Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 21 Jan 2018 18:41:48 +0000 (10:41 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 21 Jan 2018 18:41:48 +0000 (10:41 -0800)
Pull x86 kexec fix from Thomas Gleixner:
 "A single fix for the WBINVD issue introduced by the SME support which
  causes kexec fails on non AMD/SME capable CPUs. Issue WBINVD only when
  the CPU has SME and avoid doing so in a loop"

[ Side note: this patch fixes the problem, but it isn't entirely clear
  why it is required. The wbinvd should just work regardless, but there
  seems to be some system - as opposed to CPU - issue, since the wbinvd
  causes more problems later in the shutdown sequence, but wbinvd
  instructions while the system is still active are not problematic.

  Possibly some SMI or pending machine check issue on the affected system ]

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Rework wbinvd, hlt operation in stop_this_cpu()

119 files changed:
Documentation/virtual/kvm/api.txt
MAINTAINERS
arch/alpha/kernel/sys_sio.c
arch/alpha/lib/ev6-memset.S
arch/arm/boot/dts/da850-lcdk.dts
arch/arm/boot/dts/kirkwood-openblocks_a7.dts
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/configs/sunxi_defconfig
arch/arm/net/bpf_jit_32.c
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/kvm/handle_exit.c
arch/arm64/net/bpf_jit_comp.c
arch/ia64/include/asm/atomic.h
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/ar7/platform.c
arch/mips/ath25/devices.c
arch/mips/kernel/mips-cm.c
arch/mips/lib/Makefile
arch/mips/lib/libgcc.h
arch/mips/lib/multi3.c [new file with mode: 0644]
arch/mips/mm/uasm-micromips.c
arch/mips/ralink/timer.c
arch/mips/rb532/Makefile
arch/mips/rb532/devices.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/uapi/asm/kvm.h
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/xmon/xmon.c
arch/s390/include/asm/kvm_host.h
arch/s390/include/uapi/asm/kvm.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c
arch/x86/kvm/x86.c
arch/x86/mm/mem_encrypt.c
drivers/ata/libata-core.c
drivers/bcma/Kconfig
drivers/gpio/gpio-mmio.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core-smbus.c
drivers/input/misc/twl4030-vibra.c
drivers/input/misc/twl6040-vibra.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/synaptics.c
drivers/input/rmi4/rmi_driver.c
drivers/input/touchscreen/88pm860x-ts.c
drivers/input/touchscreen/of_touchscreen.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-thin-metadata.c
drivers/md/persistent-data/dm-btree.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/tun.c
drivers/net/usb/r8152.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/nvme/host/pci.c
drivers/phy/phy-core.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/ssb/Kconfig
fs/proc/array.c
include/linux/compiler-gcc.h
include/uapi/linux/kvm.h
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/irq/matrix.c
kernel/trace/ring_buffer.c
kernel/trace/trace_events.c
kernel/workqueue.c
mm/memory.c
mm/page_owner.c
net/can/af_can.c
net/core/filter.c
net/core/flow_dissector.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/netlink/af_netlink.c
net/sched/cls_bpf.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/wext-compat.c
scripts/decodecode
scripts/gdb/linux/tasks.py
tools/testing/selftests/bpf/test_verifier.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-v4.c

index 57d3ee9..fc3ae95 100644 (file)
@@ -3403,6 +3403,52 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
 or if no page table is present for the addresses (e.g. when using
 hugepages).
 
+4.108 KVM_PPC_GET_CPU_CHAR
+
+Capability: KVM_CAP_PPC_GET_CPU_CHAR
+Architectures: powerpc
+Type: vm ioctl
+Parameters: struct kvm_ppc_cpu_char (out)
+Returns: 0 on successful completion
+        -EFAULT if struct kvm_ppc_cpu_char cannot be written
+
+This ioctl gives userspace information about certain characteristics
+of the CPU relating to speculative execution of instructions and
+possible information leakage resulting from speculative execution (see
+CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754).  The information is
+returned in struct kvm_ppc_cpu_char, which looks like this:
+
+struct kvm_ppc_cpu_char {
+       __u64   character;              /* characteristics of the CPU */
+       __u64   behaviour;              /* recommended software behaviour */
+       __u64   character_mask;         /* valid bits in character */
+       __u64   behaviour_mask;         /* valid bits in behaviour */
+};
+
+For extensibility, the character_mask and behaviour_mask fields
+indicate which bits of character and behaviour have been filled in by
+the kernel.  If the set of defined bits is extended in future then
+userspace will be able to tell whether it is running on a kernel that
+knows about the new bits.
+
+The character field describes attributes of the CPU which can help
+with preventing inadvertent information disclosure - specifically,
+whether there is an instruction to flash-invalidate the L1 data cache
+(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set
+to a mode where entries can only be used by the thread that created
+them, whether the bcctr[l] instruction prevents speculation, and
+whether a speculation barrier instruction (ori 31,31,0) is provided.
+
+The behaviour field describes actions that software should take to
+prevent inadvertent information disclosure, and thus describes which
+vulnerabilities the hardware is subject to; specifically whether the
+L1 data cache should be flushed when returning to user mode from the
+kernel, and whether a speculation barrier should be placed between an
+array bounds check and the array access.
+
+These fields use the same bit definitions as the new
+H_GET_CPU_CHARACTERISTICS hypercall.
+
 5. The kvm_run structure
 ------------------------
 
index 1899480..e358141 100644 (file)
@@ -9085,6 +9085,7 @@ F:        drivers/usb/image/microtek.*
 
 MIPS
 M:     Ralf Baechle <ralf@linux-mips.org>
+M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@linux-mips.org
 W:     http://www.linux-mips.org/
 T:     git git://git.linux-mips.org/pub/scm/ralf/linux.git
index 37bd6d9..a6bdc1d 100644 (file)
@@ -102,6 +102,15 @@ sio_pci_route(void)
                                   alpha_mv.sys.sio.route_tab);
 }
 
+static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
+{
+       if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
+           (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
+               return false;
+
+       return true;
+}
+
 static unsigned int __init
 sio_collect_irq_levels(void)
 {
@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
 
        /* Iterate through the devices, collecting IRQ levels.  */
        for_each_pci_dev(dev) {
-               if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
-                   (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
+               if (!sio_pci_dev_irq_needs_level(dev))
                        continue;
 
                if (dev->irq)
@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
        return level_bits;
 }
 
-static void __init
-sio_fixup_irq_levels(unsigned int level_bits)
+static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
 {
        unsigned int old_level_bits;
 
@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
         */
        old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
 
-       level_bits |= (old_level_bits & 0x71ff);
+       if (reset)
+               old_level_bits &= 0x71ff;
+
+       level_bits |= old_level_bits;
 
        outb((level_bits >> 0) & 0xff, 0x4d0);
        outb((level_bits >> 8) & 0xff, 0x4d1);
 }
 
+static inline void
+sio_fixup_irq_levels(unsigned int level_bits)
+{
+       __sio_fixup_irq_levels(level_bits, true);
+}
+
 static inline int
 noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
        int irq = COMMON_TABLE_LOOKUP, tmp;
        tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
-       return irq >= 0 ? tmp : -1;
+
+       irq = irq >= 0 ? tmp : -1;
+
+       /* Fixup IRQ level if an actual IRQ mapping is detected */
+       if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
+               __sio_fixup_irq_levels(1 << irq, false);
+
+       return irq;
 }
 
 static inline int
index 316a99a..1cfcfbb 100644 (file)
@@ -18,7 +18,7 @@
  * The algorithm for the leading and trailing quadwords remains the same,
  * however the loop has been unrolled to enable better memory throughput,
  * and the code has been replicated for each of the entry points: __memset
- * and __memsetw to permit better scheduling to eliminate the stalling
+ * and __memset16 to permit better scheduling to eliminate the stalling
  * encountered during the mask replication.
  * A future enhancement might be to put in a byte store loop for really
  * small (say < 32 bytes) memset()s.  Whether or not that change would be
@@ -34,7 +34,7 @@
        .globl memset
        .globl __memset
        .globl ___memset
-       .globl __memsetw
+       .globl __memset16
        .globl __constant_c_memset
 
        .ent ___memset
@@ -415,9 +415,9 @@ end:
         * to mask stalls.  Note that entry point names also had to change
         */
        .align 5
-       .ent __memsetw
+       .ent __memset16
 
-__memsetw:
+__memset16:
        .frame $30,0,$26,0
        .prologue 0
 
@@ -596,8 +596,8 @@ end_w:
        nop
        ret $31,($26),1         # L0 :
 
-       .end __memsetw
-       EXPORT_SYMBOL(__memsetw)
+       .end __memset16
+       EXPORT_SYMBOL(__memset16)
 
 memset = ___memset
 __memset = ___memset
index eed89e6..a1f4d6d 100644 (file)
                                        label = "u-boot env";
                                        reg = <0 0x020000>;
                                };
-                               partition@0x020000 {
+                               partition@20000 {
                                        /* The LCDK defaults to booting from this partition */
                                        label = "u-boot";
                                        reg = <0x020000 0x080000>;
                                };
-                               partition@0x0a0000 {
+                               partition@a0000 {
                                        label = "free space";
                                        reg = <0x0a0000 0>;
                                };
index cf2f524..27cc913 100644 (file)
@@ -53,7 +53,8 @@
                };
 
                pinctrl: pin-controller@10000 {
-                       pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
+                       pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
+                                    &pmx_gpio_header_gpo>;
                        pinctrl-names = "default";
 
                        pmx_uart0: pmx-uart0 {
                         * ground.
                         */
                        pmx_gpio_header: pmx-gpio-header {
-                               marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
+                               marvell,pins = "mpp17", "mpp29", "mpp28",
                                               "mpp35", "mpp34", "mpp40";
                                marvell,function = "gpio";
                        };
 
+                       pmx_gpio_header_gpo: pxm-gpio-header-gpo {
+                               marvell,pins = "mpp7";
+                               marvell,function = "gpo";
+                       };
+
                        pmx_gpio_init: pmx-init {
                                marvell,pins = "mpp38";
                                marvell,function = "gpio";
index 5840f5c..4f2f2ee 100644 (file)
 
                                        be1_out_tcon0: endpoint@0 {
                                                reg = <0>;
-                                               remote-endpoint = <&tcon1_in_be0>;
+                                               remote-endpoint = <&tcon0_in_be1>;
                                        };
 
                                        be1_out_tcon1: endpoint@1 {
index 59655e4..bd0cd32 100644 (file)
 
                                        be1_out_tcon0: endpoint@0 {
                                                reg = <0>;
-                                               remote-endpoint = <&tcon1_in_be0>;
+                                               remote-endpoint = <&tcon0_in_be1>;
                                        };
 
                                        be1_out_tcon1: endpoint@1 {
index 5caaf97..df433ab 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_SMP=y
 CONFIG_NR_CPUS=8
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
+CONFIG_CMA=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CPU_FREQ=y
@@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_ATA=y
 CONFIG_AHCI_SUNXI=y
index c199990..323a4df 100644 (file)
 
 int bpf_jit_enable __read_mostly;
 
+/*
+ * eBPF prog stack layout:
+ *
+ *                         high
+ * original ARM_SP =>     +-----+
+ *                        |     | callee saved registers
+ *                        +-----+ <= (BPF_FP + SCRATCH_SIZE)
+ *                        | ... | eBPF JIT scratch space
+ * eBPF fp register =>    +-----+
+ *   (BPF_FP)             | ... | eBPF prog stack
+ *                        +-----+
+ *                        |RSVD | JIT scratchpad
+ * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
+ *                        |     |
+ *                        | ... | Function call stack
+ *                        |     |
+ *                        +-----+
+ *                          low
+ *
+ * The callee saved registers depends on whether frame pointers are enabled.
+ * With frame pointers (to be compliant with the ABI):
+ *
+ *                                high
+ * original ARM_SP =>     +------------------+ \
+ *                        |        pc        | |
+ * current ARM_FP =>      +------------------+ } callee saved registers
+ *                        |r4-r8,r10,fp,ip,lr| |
+ *                        +------------------+ /
+ *                                low
+ *
+ * Without frame pointers:
+ *
+ *                                high
+ * original ARM_SP =>     +------------------+
+ *                        | r4-r8,r10,fp,lr  | callee saved registers
+ * current ARM_FP =>      +------------------+
+ *                                low
+ *
+ * When popping registers off the stack at the end of a BPF function, we
+ * reference them via the current ARM_FP register.
+ */
+#define CALLEE_MASK    (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
+                        1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
+                        1 << ARM_FP)
+#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
+#define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
+
 #define STACK_OFFSET(k)        (k)
 #define TMP_REG_1      (MAX_BPF_JIT_REG + 0)   /* TEMP Register 1 */
 #define TMP_REG_2      (MAX_BPF_JIT_REG + 1)   /* TEMP Register 2 */
 #define TCALL_CNT      (MAX_BPF_JIT_REG + 2)   /* Tail Call Count */
 
-/* Flags used for JIT optimization */
-#define SEEN_CALL      (1 << 0)
-
 #define FLAG_IMM_OVERFLOW      (1 << 0)
 
 /*
@@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = {
  * idx                 :       index of current last JITed instruction.
  * prologue_bytes      :       bytes used in prologue.
  * epilogue_offset     :       offset of epilogue starting.
- * seen                        :       bit mask used for JIT optimization.
  * offsets             :       array of eBPF instruction offsets in
  *                             JITed code.
  * target              :       final JITed code.
@@ -110,7 +153,6 @@ struct jit_ctx {
        unsigned int idx;
        unsigned int prologue_bytes;
        unsigned int epilogue_offset;
-       u32 seen;
        u32 flags;
        u32 *offsets;
        u32 *target;
@@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size)
                *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
 }
 
-/* Stack must be multiples of 16 Bytes */
-#define STACK_ALIGN(sz) (((sz) + 3) & ~3)
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+/* EABI requires the stack to be aligned to 64-bit boundaries */
+#define STACK_ALIGNMENT        8
+#else
+/* Stack must be aligned to 32-bit boundaries */
+#define STACK_ALIGNMENT        4
+#endif
 
 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
  * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
@@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size)
         + SCRATCH_SIZE + \
         + 4 /* extra for skb_copy_bits buffer */)
 
-#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
+#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
 
 /* Get the offset of eBPF REGISTERs stored on scratch space. */
 #define STACK_VAR(off) (STACK_SIZE-off-4)
@@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
                emit_mov_i_no8m(rd, val, ctx);
 }
 
-static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
 {
-       ctx->seen |= SEEN_CALL;
-#if __LINUX_ARM_ARCH__ < 5
-       emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
-
        if (elf_hwcap & HWCAP_THUMB)
                emit(ARM_BX(tgt_reg), ctx);
        else
                emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
+}
+
+static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ < 5
+       emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
+       emit_bx_r(tgt_reg, ctx);
 #else
        emit(ARM_BLX_R(tgt_reg), ctx);
 #endif
@@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
        }
 
        /* Call appropriate function */
-       ctx->seen |= SEEN_CALL;
        emit_mov_i(ARM_IP, op == BPF_DIV ?
                   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
        emit_blx_r(ARM_IP, ctx);
@@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do LSH operation */
        emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
        emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
@@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do the ARSH operation */
        emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
        _emit(ARM_COND_MI, ARM_B(0), ctx);
@@ -692,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do LSH operation */
        emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
        emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
@@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
        /* Do Multiplication */
        emit(ARM_MUL(ARM_IP, rd, rn), ctx);
        emit(ARM_MUL(ARM_LR, rm, rt), ctx);
-       /* As we are using ARM_LR */
-       ctx->seen |= SEEN_CALL;
        emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
 
        emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
@@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
 }
 
 /* dst = *(size*)(src + off) */
-static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk,
-                             const s32 off, struct jit_ctx *ctx, const u8 sz){
+static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+                             s32 off, struct jit_ctx *ctx, const u8 sz){
        const u8 *tmp = bpf2a32[TMP_REG_1];
-       u8 rd = dstk ? tmp[1] : dst;
+       const u8 *rd = dstk ? tmp : dst;
        u8 rm = src;
+       s32 off_max;
 
-       if (off) {
+       if (sz == BPF_H)
+               off_max = 0xff;
+       else
+               off_max = 0xfff;
+
+       if (off < 0 || off > off_max) {
                emit_a32_mov_i(tmp[0], off, false, ctx);
                emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
                rm = tmp[0];
+               off = 0;
+       } else if (rd[1] == rm) {
+               emit(ARM_MOV_R(tmp[0], rm), ctx);
+               rm = tmp[0];
        }
        switch (sz) {
-       case BPF_W:
-               /* Load a Word */
-               emit(ARM_LDR_I(rd, rm, 0), ctx);
+       case BPF_B:
+               /* Load a Byte */
+               emit(ARM_LDRB_I(rd[1], rm, off), ctx);
+               emit_a32_mov_i(dst[0], 0, dstk, ctx);
                break;
        case BPF_H:
                /* Load a HalfWord */
-               emit(ARM_LDRH_I(rd, rm, 0), ctx);
+               emit(ARM_LDRH_I(rd[1], rm, off), ctx);
+               emit_a32_mov_i(dst[0], 0, dstk, ctx);
                break;
-       case BPF_B:
-               /* Load a Byte */
-               emit(ARM_LDRB_I(rd, rm, 0), ctx);
+       case BPF_W:
+               /* Load a Word */
+               emit(ARM_LDR_I(rd[1], rm, off), ctx);
+               emit_a32_mov_i(dst[0], 0, dstk, ctx);
+               break;
+       case BPF_DW:
+               /* Load a Double Word */
+               emit(ARM_LDR_I(rd[1], rm, off), ctx);
+               emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
                break;
        }
        if (dstk)
-               emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+               emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
+       if (dstk && sz == BPF_DW)
+               emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
 }
 
 /* Arithmatic Operation */
@@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
                             const u8 rn, struct jit_ctx *ctx, u8 op) {
        switch (op) {
        case BPF_JSET:
-               ctx->seen |= SEEN_CALL;
                emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
                emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
                emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
@@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        const u8 *tcc = bpf2a32[TCALL_CNT];
        const int idx0 = ctx->idx;
 #define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+#define jmp_offset (out_offset - (cur_offset) - 2)
        u32 off, lo, hi;
 
        /* if (index >= array->map.max_entries)
@@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        emit_a32_mov_i(tmp[1], off, false, ctx);
        emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
        emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
-       /* index (64 bit) */
+       /* index is 32-bit for arrays */
        emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
        /* index >= array->map.max_entries */
        emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
@@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        emit_a32_mov_i(tmp2[1], off, false, ctx);
        emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
        emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
-       emit(ARM_BX(tmp[1]), ctx);
+       emit_bx_r(tmp[1], ctx);
 
        /* out: */
        if (out_offset == -1)
@@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx)
        const u8 r2 = bpf2a32[BPF_REG_1][1];
        const u8 r3 = bpf2a32[BPF_REG_1][0];
        const u8 r4 = bpf2a32[BPF_REG_6][1];
-       const u8 r5 = bpf2a32[BPF_REG_6][0];
-       const u8 r6 = bpf2a32[TMP_REG_1][1];
-       const u8 r7 = bpf2a32[TMP_REG_1][0];
-       const u8 r8 = bpf2a32[TMP_REG_2][1];
-       const u8 r10 = bpf2a32[TMP_REG_2][0];
        const u8 fplo = bpf2a32[BPF_REG_FP][1];
        const u8 fphi = bpf2a32[BPF_REG_FP][0];
-       const u8 sp = ARM_SP;
        const u8 *tcc = bpf2a32[TCALL_CNT];
 
-       u16 reg_set = 0;
-
-       /*
-        * eBPF prog stack layout
-        *
-        *                         high
-        * original ARM_SP =>     +-----+ eBPF prologue
-        *                        |FP/LR|
-        * current ARM_FP =>      +-----+
-        *                        | ... | callee saved registers
-        * eBPF fp register =>    +-----+ <= (BPF_FP)
-        *                        | ... | eBPF JIT scratch space
-        *                        |     | eBPF prog stack
-        *                        +-----+
-        *                        |RSVD | JIT scratchpad
-        * current A64_SP =>      +-----+ <= (BPF_FP - STACK_SIZE)
-        *                        |     |
-        *                        | ... | Function call stack
-        *                        |     |
-        *                        +-----+
-        *                          low
-        */
-
        /* Save callee saved registers. */
-       reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
 #ifdef CONFIG_FRAME_POINTER
-       reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC);
-       emit(ARM_MOV_R(ARM_IP, sp), ctx);
+       u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
+       emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
        emit(ARM_PUSH(reg_set), ctx);
        emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
 #else
-       /* Check if call instruction exists in BPF body */
-       if (ctx->seen & SEEN_CALL)
-               reg_set |= (1<<ARM_LR);
-       emit(ARM_PUSH(reg_set), ctx);
+       emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
+       emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
 #endif
        /* Save frame pointer for later */
-       emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx);
+       emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
 
        ctx->stack_size = imm8m(STACK_SIZE);
 
@@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx)
        /* end of prologue */
 }
 
+/* restore callee saved registers. */
 static void build_epilogue(struct jit_ctx *ctx)
 {
-       const u8 r4 = bpf2a32[BPF_REG_6][1];
-       const u8 r5 = bpf2a32[BPF_REG_6][0];
-       const u8 r6 = bpf2a32[TMP_REG_1][1];
-       const u8 r7 = bpf2a32[TMP_REG_1][0];
-       const u8 r8 = bpf2a32[TMP_REG_2][1];
-       const u8 r10 = bpf2a32[TMP_REG_2][0];
-       u16 reg_set = 0;
-
-       /* unwind function call stack */
-       emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
-
-       /* restore callee saved registers. */
-       reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
 #ifdef CONFIG_FRAME_POINTER
-       /* the first instruction of the prologue was: mov ip, sp */
-       reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC);
+       /* When using frame pointers, some additional registers need to
+        * be loaded. */
+       u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
+       emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
        emit(ARM_LDM(ARM_SP, reg_set), ctx);
 #else
-       if (ctx->seen & SEEN_CALL)
-               reg_set |= (1<<ARM_PC);
        /* Restore callee saved registers. */
-       emit(ARM_POP(reg_set), ctx);
-       /* Return back to the callee function */
-       if (!(ctx->seen & SEEN_CALL))
-               emit(ARM_BX(ARM_LR), ctx);
+       emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
+       emit(ARM_POP(CALLEE_POP_MASK), ctx);
 #endif
 }
 
@@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                        emit_rev32(rt, rt, ctx);
                        goto emit_bswap_uxt;
                case 64:
-                       /* Because of the usage of ARM_LR */
-                       ctx->seen |= SEEN_CALL;
                        emit_rev32(ARM_LR, rt, ctx);
                        emit_rev32(rt, rd, ctx);
                        emit(ARM_MOV_R(rd, ARM_LR), ctx);
@@ -1448,22 +1460,7 @@ exit:
                rn = sstk ? tmp2[1] : src_lo;
                if (sstk)
                        emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
-               switch (BPF_SIZE(code)) {
-               case BPF_W:
-                       /* Load a Word */
-               case BPF_H:
-                       /* Load a Half-Word */
-               case BPF_B:
-                       /* Load a Byte */
-                       emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
-                       emit_a32_mov_i(dst_hi, 0, dstk, ctx);
-                       break;
-               case BPF_DW:
-                       /* Load a double word */
-                       emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
-                       emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
-                       break;
-               }
+               emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
                break;
        /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
        case BPF_LD | BPF_ABS | BPF_W:
index 7c9bdc7..9db1931 100644 (file)
@@ -66,6 +66,7 @@
                                     <&cpu1>,
                                     <&cpu2>,
                                     <&cpu3>;
+               interrupt-parent = <&intc>;
        };
 
        psci {
index e3b64d0..9c7724e 100644 (file)
                        cpm_ethernet: ethernet@0 {
                                compatible = "marvell,armada-7k-pp22";
                                reg = <0x0 0x100000>, <0x129000 0xb000>;
-                               clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>;
-                               clock-names = "pp_clk", "gop_clk", "mg_clk";
+                               clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>,
+                                        <&cpm_clk 1 5>, <&cpm_clk 1 18>;
+                               clock-names = "pp_clk", "gop_clk",
+                                             "mg_clk","axi_clk";
                                marvell,system-controller = <&cpm_syscon0>;
                                status = "disabled";
                                dma-coherent;
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x12a200 0x10>;
-                               clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>;
+                               clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>,
+                                        <&cpm_clk 1 6>, <&cpm_clk 1 18>;
                                status = "disabled";
                        };
 
                                compatible = "marvell,armada-cp110-sdhci";
                                reg = <0x780000 0x300>;
                                interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
-                               clock-names = "core";
-                               clocks = <&cpm_clk 1 4>;
+                               clock-names = "core","axi";
+                               clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>;
                                dma-coherent;
                                status = "disabled";
                        };
index 0d51096..87ac68b 100644 (file)
                        cps_ethernet: ethernet@0 {
                                compatible = "marvell,armada-7k-pp22";
                                reg = <0x0 0x100000>, <0x129000 0xb000>;
-                               clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>;
-                               clock-names = "pp_clk", "gop_clk", "mg_clk";
+                               clocks = <&cps_clk 1 3>, <&cps_clk 1 9>,
+                                        <&cps_clk 1 5>, <&cps_clk 1 18>;
+                               clock-names = "pp_clk", "gop_clk",
+                                             "mg_clk", "axi_clk";
                                marvell,system-controller = <&cps_syscon0>;
                                status = "disabled";
                                dma-coherent;
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x12a200 0x10>;
-                               clocks = <&cps_clk 1 9>, <&cps_clk 1 5>;
+                               clocks = <&cps_clk 1 9>, <&cps_clk 1 5>,
+                                        <&cps_clk 1 6>, <&cps_clk 1 18>;
                                status = "disabled";
                        };
 
index 304203f..e60494f 100644 (file)
@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        ret = kvm_psci_call(vcpu);
        if (ret < 0) {
-               kvm_inject_undefined(vcpu);
+               vcpu_set_reg(vcpu, 0, ~0UL);
                return 1;
        }
 
@@ -54,7 +54,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       kvm_inject_undefined(vcpu);
+       vcpu_set_reg(vcpu, 0, ~0UL);
        return 1;
 }
 
index ba38d40..bb32f7f 100644 (file)
@@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 /* Stack must be multiples of 16B */
 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
 
-#define PROLOGUE_OFFSET 8
+/* Tail call offset to jump into */
+#define PROLOGUE_OFFSET 7
 
 static int build_prologue(struct jit_ctx *ctx)
 {
@@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
        /* Initialize tail_call_cnt */
        emit(A64_MOVZ(1, tcc, 0, 0), ctx);
 
-       /* 4 byte extra for skb_copy_bits buffer */
-       ctx->stack_size = prog->aux->stack_depth + 4;
-       ctx->stack_size = STACK_ALIGN(ctx->stack_size);
-
-       /* Set up function call stack */
-       emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
-
        cur_offset = ctx->idx - idx0;
        if (cur_offset != PROLOGUE_OFFSET) {
                pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
                            cur_offset, PROLOGUE_OFFSET);
                return -1;
        }
+
+       /* 4 byte extra for skb_copy_bits buffer */
+       ctx->stack_size = prog->aux->stack_depth + 4;
+       ctx->stack_size = STACK_ALIGN(ctx->stack_size);
+
+       /* Set up function call stack */
+       emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
        return 0;
 }
 
@@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        emit(A64_LDR64(prg, tmp, prg), ctx);
        emit(A64_CBZ(1, prg, jmp_offset), ctx);
 
-       /* goto *(prog->bpf_func + prologue_size); */
+       /* goto *(prog->bpf_func + prologue_offset); */
        off = offsetof(struct bpf_prog, bpf_func);
        emit_a64_mov_i64(tmp, off, ctx);
        emit(A64_LDR64(tmp, prg, tmp), ctx);
        emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
+       emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
        emit(A64_BR(tmp), ctx);
 
        /* out: */
index 28e02c9..762eeb0 100644 (file)
@@ -65,29 +65,30 @@ ia64_atomic_fetch_##op (int i, atomic_t *v)                         \
 ATOMIC_OPS(add, +)
 ATOMIC_OPS(sub, -)
 
-#define atomic_add_return(i,v)                                         \
+#ifdef __OPTIMIZE__
+#define __ia64_atomic_const(i) __builtin_constant_p(i) ?               \
+               ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||       \
+                (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
+
+#define atomic_add_return(i, v)                                                \
 ({                                                                     \
-       int __ia64_aar_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
-               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
-               : ia64_atomic_add(__ia64_aar_i, v);                     \
+       int __i = (i);                                                  \
+       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
+       __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) :      \
+                               ia64_atomic_add(__i, v);                \
 })
 
-#define atomic_sub_return(i,v)                                         \
+#define atomic_sub_return(i, v)                                                \
 ({                                                                     \
-       int __ia64_asr_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
-               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
-               : ia64_atomic_sub(__ia64_asr_i, v);                     \
+       int __i = (i);                                                  \
+       static const int __ia64_atomic_p = __ia64_atomic_const(i);      \
+       __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) :     \
+                               ia64_atomic_sub(__i, v);                \
 })
+#else
+#define atomic_add_return(i, v)        ia64_atomic_add(i, v)
+#define atomic_sub_return(i, v)        ia64_atomic_sub(i, v)
+#endif
 
 #define atomic_fetch_add(i,v)                                          \
 ({                                                                     \
index 350a990..8e0b370 100644 (file)
@@ -259,6 +259,7 @@ config BCM47XX
        select LEDS_GPIO_REGISTER
        select BCM47XX_NVRAM
        select BCM47XX_SPROM
+       select BCM47XX_SSB if !BCM47XX_BCMA
        help
         Support for BCM47XX based boards
 
@@ -389,6 +390,7 @@ config LANTIQ
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_MIPS16
        select SYS_SUPPORTS_MULTITHREADING
+       select SYS_SUPPORTS_VPE_LOADER
        select SYS_HAS_EARLY_PRINTK
        select GPIOLIB
        select SWAP_IO_SPACE
@@ -516,6 +518,7 @@ config MIPS_MALTA
        select SYS_SUPPORTS_MIPS16
        select SYS_SUPPORTS_MULTITHREADING
        select SYS_SUPPORTS_SMARTMIPS
+       select SYS_SUPPORTS_VPE_LOADER
        select SYS_SUPPORTS_ZBOOT
        select SYS_SUPPORTS_RELOCATABLE
        select USE_OF
@@ -2281,9 +2284,16 @@ config MIPSR2_TO_R6_EMULATOR
          The only reason this is a build-time option is to save ~14K from the
          final kernel image.
 
+config SYS_SUPPORTS_VPE_LOADER
+       bool
+       depends on SYS_SUPPORTS_MULTITHREADING
+       help
+         Indicates that the platform supports the VPE loader, and provides
+         physical_memsize.
+
 config MIPS_VPE_LOADER
        bool "VPE loader support."
-       depends on SYS_SUPPORTS_MULTITHREADING && MODULES
+       depends on SYS_SUPPORTS_VPE_LOADER && MODULES
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select MIPS_MT
index 464af5e..0749c37 100644 (file)
@@ -124,30 +124,36 @@ config SCACHE_DEBUGFS
 
          If unsure, say N.
 
-menuconfig MIPS_CPS_NS16550
+menuconfig MIPS_CPS_NS16550_BOOL
        bool "CPS SMP NS16550 UART output"
        depends on MIPS_CPS
        help
          Output debug information via an ns16550 compatible UART if exceptions
          occur early in the boot process of a secondary core.
 
-if MIPS_CPS_NS16550
+if MIPS_CPS_NS16550_BOOL
+
+config MIPS_CPS_NS16550
+       def_bool MIPS_CPS_NS16550_BASE != 0
 
 config MIPS_CPS_NS16550_BASE
        hex "UART Base Address"
        default 0x1b0003f8 if MIPS_MALTA
+       default 0
        help
          The base address of the ns16550 compatible UART on which to output
          debug information from the early stages of core startup.
 
+         This is only used if non-zero.
+
 config MIPS_CPS_NS16550_SHIFT
        int "UART Register Shift"
-       default 0 if MIPS_MALTA
+       default 0
        help
          The number of bits to shift ns16550 register indices by in order to
          form their addresses. That is, log base 2 of the span between
          adjacent ns16550 registers in the system.
 
-endif # MIPS_CPS_NS16550
+endif # MIPS_CPS_NS16550_BOOL
 
 endmenu
index 4674f1e..e1675c2 100644 (file)
@@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
        uart_port.type          = PORT_AR7;
        uart_port.uartclk       = clk_get_rate(bus_clk) / 2;
        uart_port.iotype        = UPIO_MEM32;
-       uart_port.flags         = UPF_FIXED_TYPE;
+       uart_port.flags         = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
        uart_port.regshift      = 2;
 
        uart_port.line          = 0;
index e115634..301a902 100644 (file)
@@ -73,6 +73,7 @@ const char *get_system_type(void)
 
 void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
 {
+#ifdef CONFIG_SERIAL_8250_CONSOLE
        struct uart_port s;
 
        memset(&s, 0, sizeof(s));
@@ -85,6 +86,7 @@ void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
        s.uartclk = uartclk;
 
        early_serial_setup(&s);
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
 }
 
 int __init ath25_add_wmac(int nr, u32 base, int irq)
index dd5567b..8f5bd04 100644 (file)
@@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
                                  *this_cpu_ptr(&cm_core_lock_flags));
        } else {
                WARN_ON(cluster != 0);
-               WARN_ON(vp != 0);
                WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
 
                /*
index 78c2aff..e84e126 100644 (file)
@@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000)               += r3k_dump_tlb.o
 obj-$(CONFIG_CPU_TX39XX)       += r3k_dump_tlb.o
 
 # libgcc-style stuff needed in the kernel
-obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o
+obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
+        ucmpdi2.o
index 28002ed..199a7f9 100644 (file)
@@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
 struct DWstruct {
        int high, low;
 };
+
+struct TWstruct {
+       long long high, low;
+};
 #elif defined(__LITTLE_ENDIAN)
 struct DWstruct {
        int low, high;
 };
+
+struct TWstruct {
+       long long low, high;
+};
 #else
 #error I feel sick.
 #endif
@@ -23,4 +31,13 @@ typedef union {
        long long ll;
 } DWunion;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
+typedef int ti_type __attribute__((mode(TI)));
+
+typedef union {
+       struct TWstruct s;
+       ti_type ti;
+} TWunion;
+#endif
+
 #endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644 (file)
index 0000000..111ad47
--- /dev/null
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+
+#include "libgcc.h"
+
+/*
+ * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
+ * specific case only we'll implement it here.
+ *
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
+ */
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+
+/* multiply 64-bit values, low 64-bits returned */
+static inline long long notrace dmulu(long long a, long long b)
+{
+       long long res;
+
+       asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+       return res;
+}
+
+/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
+static inline long long notrace dmuhu(long long a, long long b)
+{
+       long long res;
+
+       asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
+       return res;
+}
+
+/* multiply 128-bit values, low 128-bits returned */
+ti_type notrace __multi3(ti_type a, ti_type b)
+{
+       TWunion res, aa, bb;
+
+       aa.ti = a;
+       bb.ti = b;
+
+       /*
+        * a * b =           (a.lo * b.lo)
+        *         + 2^64  * (a.hi * b.lo + a.lo * b.hi)
+        *        [+ 2^128 * (a.hi * b.hi)]
+        */
+       res.s.low = dmulu(aa.s.low, bb.s.low);
+       res.s.high = dmuhu(aa.s.low, bb.s.low);
+       res.s.high += dmulu(aa.s.high, bb.s.low);
+       res.s.high += dmulu(aa.s.low, bb.s.high);
+
+       return res.ti;
+}
+EXPORT_SYMBOL(__multi3);
+
+#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
index cdb5a19..9bb6baa 100644 (file)
@@ -40,7 +40,7 @@
 
 #include "uasm.c"
 
-static const struct insn const insn_table_MM[insn_invalid] = {
+static const struct insn insn_table_MM[insn_invalid] = {
        [insn_addu]     = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
        [insn_addiu]    = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
        [insn_and]      = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
index d4469b2..4f46a45 100644 (file)
@@ -109,9 +109,9 @@ static int rt_timer_probe(struct platform_device *pdev)
        }
 
        rt->irq = platform_get_irq(pdev, 0);
-       if (!rt->irq) {
+       if (rt->irq < 0) {
                dev_err(&pdev->dev, "failed to load irq\n");
-               return -ENOENT;
+               return rt->irq;
        }
 
        rt->membase = devm_ioremap_resource(&pdev->dev, res);
index efdecdb..8186afc 100644 (file)
@@ -2,4 +2,6 @@
 # Makefile for the RB532 board specific parts of the kernel
 #
 
-obj-y   += irq.o time.o setup.o serial.o prom.o gpio.o devices.o
+obj-$(CONFIG_SERIAL_8250_CONSOLE) += serial.o
+
+obj-y   += irq.o time.o setup.o prom.o gpio.o devices.o
index 32ea3e6..354d258 100644 (file)
@@ -310,6 +310,8 @@ static int __init plat_setup_devices(void)
        return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
 }
 
+#ifdef CONFIG_NET
+
 static int __init setup_kmac(char *s)
 {
        printk(KERN_INFO "korina mac = %s\n", s);
@@ -322,4 +324,6 @@ static int __init setup_kmac(char *s)
 
 __setup("kmac=", setup_kmac);
 
+#endif /* CONFIG_NET */
+
 arch_initcall(plat_setup_devices);
index c51e6ce..2ed525a 100644 (file)
@@ -166,6 +166,7 @@ config PPC
        select GENERIC_CLOCKEVENTS_BROADCAST    if SMP
        select GENERIC_CMOS_UPDATE
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_VULNERABILITIES      if PPC_BOOK3S_64
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
        select GENERIC_SMP_IDLE_THREAD
index f046161..eca3f9c 100644 (file)
 #define PROC_TABLE_GTSE                0x01
 
 #ifndef __ASSEMBLY__
+#include <linux/types.h>
 
 /**
  * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
index 61d6049..637b726 100644 (file)
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
        __u32   ap_encodings[8];
 };
 
+/* For KVM_PPC_GET_CPU_CHAR */
+struct kvm_ppc_cpu_char {
+       __u64   character;              /* characteristics of the CPU */
+       __u64   behaviour;              /* recommended software behaviour */
+       __u64   character_mask;         /* valid bits in character */
+       __u64   behaviour_mask;         /* valid bits in behaviour */
+};
+
+/*
+ * Values for character and character_mask.
+ * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
+ */
+#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31                (1ULL << 63)
+#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED     (1ULL << 62)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30       (1ULL << 61)
+#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2       (1ULL << 60)
+#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV       (1ULL << 59)
+#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED      (1ULL << 58)
+#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF     (1ULL << 57)
+#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS       (1ULL << 56)
+
+#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY      (1ULL << 63)
+#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR         (1ULL << 62)
+#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR    (1ULL << 61)
+
 /* Per-vcpu XICS interrupt controller state */
 #define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
 
index 9d21354..8fd3a70 100644 (file)
@@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        unsigned short maj;
        unsigned short min;
 
-       /* We only show online cpus: disable preempt (overzealous, I
-        * knew) to prevent cpu going down. */
-       preempt_disable();
-       if (!cpu_online(cpu_id)) {
-               preempt_enable();
-               return 0;
-       }
-
 #ifdef CONFIG_SMP
        pvr = per_cpu(cpu_pvr, cpu_id);
 #else
@@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #ifdef CONFIG_SMP
        seq_printf(m, "\n");
 #endif
-
-       preempt_enable();
-
        /* If this is the last cpu, print the summary */
        if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
                show_cpuinfo_summary(m);
index 491be41..e67413f 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/memory.h>
 #include <linux/nmi.h>
 
+#include <asm/debugfs.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -901,4 +902,41 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
        if (!no_rfi_flush)
                rfi_flush_enable(enable);
 }
+
+#ifdef CONFIG_DEBUG_FS
+static int rfi_flush_set(void *data, u64 val)
+{
+       if (val == 1)
+               rfi_flush_enable(true);
+       else if (val == 0)
+               rfi_flush_enable(false);
+       else
+               return -EINVAL;
+
+       return 0;
+}
+
+static int rfi_flush_get(void *data, u64 *val)
+{
+       *val = rfi_flush ? 1 : 0;
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+
+static __init int rfi_flush_debugfs_init(void)
+{
+       debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+       return 0;
+}
+device_initcall(rfi_flush_debugfs_init);
+#endif
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       if (rfi_flush)
+               return sprintf(buf, "Mitigation: RFI Flush\n");
+
+       return sprintf(buf, "Vulnerable\n");
+}
 #endif /* CONFIG_PPC_BOOK3S_64 */
index 1915e86..0a7c887 100644 (file)
 #include <asm/iommu.h>
 #include <asm/switch_to.h>
 #include <asm/xive.h>
+#ifdef CONFIG_PPC_PSERIES
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#endif
 
 #include "timing.h"
 #include "irq.h"
@@ -548,6 +552,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #ifdef CONFIG_KVM_XICS
        case KVM_CAP_IRQ_XICS:
 #endif
+       case KVM_CAP_PPC_GET_CPU_CHAR:
                r = 1;
                break;
 
@@ -1759,6 +1764,124 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
        return r;
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * These functions check whether the underlying hardware is safe
+ * against attacks based on observing the effects of speculatively
+ * executed instructions, and whether it supplies instructions for
+ * use in workarounds.  The information comes from firmware, either
+ * via the device tree on powernv platforms or from an hcall on
+ * pseries platforms.
+ */
+#ifdef CONFIG_PPC_PSERIES
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       struct h_cpu_char_result c;
+       unsigned long rc;
+
+       if (!machine_is(pseries))
+               return -ENOTTY;
+
+       rc = plpar_get_cpu_characteristics(&c);
+       if (rc == H_SUCCESS) {
+               cp->character = c.character;
+               cp->behaviour = c.behaviour;
+               cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+                       KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+                       KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+                       KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
+                       KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+               cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+                       KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+       }
+       return 0;
+}
+#else
+static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       return -ENOTTY;
+}
+#endif
+
+static inline bool have_fw_feat(struct device_node *fw_features,
+                               const char *state, const char *name)
+{
+       struct device_node *np;
+       bool r = false;
+
+       np = of_get_child_by_name(fw_features, name);
+       if (np) {
+               r = of_property_read_bool(np, state);
+               of_node_put(np);
+       }
+       return r;
+}
+
+static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
+{
+       struct device_node *np, *fw_features;
+       int r;
+
+       memset(cp, 0, sizeof(*cp));
+       r = pseries_get_cpu_char(cp);
+       if (r != -ENOTTY)
+               return r;
+
+       np = of_find_node_by_name(NULL, "ibm,opal");
+       if (np) {
+               fw_features = of_get_child_by_name(np, "fw-features");
+               of_node_put(np);
+               if (!fw_features)
+                       return 0;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-spec-barrier-ori31,31,0"))
+                       cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-bcctrl-serialized"))
+                       cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-l1d-flush-ori30,30,0"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
+               if (have_fw_feat(fw_features, "enabled",
+                                "inst-l1d-flush-trig2"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-l1d-thread-split"))
+                       cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
+               if (have_fw_feat(fw_features, "enabled",
+                                "fw-count-cache-disabled"))
+                       cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+               cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
+                       KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
+                       KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
+                       KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
+                       KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
+
+               if (have_fw_feat(fw_features, "enabled",
+                                "speculation-policy-favor-security"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
+               if (!have_fw_feat(fw_features, "disabled",
+                                 "needs-l1d-flush-msr-pr-0-to-1"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
+               if (!have_fw_feat(fw_features, "disabled",
+                                 "needs-spec-barrier-for-bound-checks"))
+                       cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+               cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
+                       KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
+                       KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
+
+               of_node_put(fw_features);
+       }
+
+       return 0;
+}
+#endif
+
 long kvm_arch_vm_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg)
 {
@@ -1861,6 +1984,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        r = -EFAULT;
                break;
        }
+       case KVM_PPC_GET_CPU_CHAR: {
+               struct kvm_ppc_cpu_char cpuchar;
+
+               r = kvmppc_get_cpu_char(&cpuchar);
+               if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
+                       r = -EFAULT;
+               break;
+       }
        default: {
                struct kvm *kvm = filp->private_data;
                r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
index cab24f5..0ddc7ac 100644 (file)
@@ -2344,10 +2344,10 @@ static void dump_one_paca(int cpu)
        DUMP(p, kernel_toc, "lx");
        DUMP(p, kernelbase, "lx");
        DUMP(p, kernel_msr, "lx");
-       DUMP(p, emergency_sp, "p");
+       DUMP(p, emergency_sp, "px");
 #ifdef CONFIG_PPC_BOOK3S_64
-       DUMP(p, nmi_emergency_sp, "p");
-       DUMP(p, mc_emergency_sp, "p");
+       DUMP(p, nmi_emergency_sp, "px");
+       DUMP(p, mc_emergency_sp, "px");
        DUMP(p, in_nmi, "x");
        DUMP(p, in_mce, "x");
        DUMP(p, hmi_event_available, "x");
@@ -2375,17 +2375,21 @@ static void dump_one_paca(int cpu)
        DUMP(p, slb_cache_ptr, "x");
        for (i = 0; i < SLB_CACHE_ENTRIES; i++)
                printf(" slb_cache[%d]:        = 0x%016lx\n", i, p->slb_cache[i]);
+
+       DUMP(p, rfi_flush_fallback_area, "px");
+       DUMP(p, l1d_flush_congruence, "llx");
+       DUMP(p, l1d_flush_sets, "llx");
 #endif
        DUMP(p, dscr_default, "llx");
 #ifdef CONFIG_PPC_BOOK3E
-       DUMP(p, pgd, "p");
-       DUMP(p, kernel_pgd, "p");
-       DUMP(p, tcd_ptr, "p");
-       DUMP(p, mc_kstack, "p");
-       DUMP(p, crit_kstack, "p");
-       DUMP(p, dbg_kstack, "p");
+       DUMP(p, pgd, "px");
+       DUMP(p, kernel_pgd, "px");
+       DUMP(p, tcd_ptr, "px");
+       DUMP(p, mc_kstack, "px");
+       DUMP(p, crit_kstack, "px");
+       DUMP(p, dbg_kstack, "px");
 #endif
-       DUMP(p, __current, "p");
+       DUMP(p, __current, "px");
        DUMP(p, kstack, "lx");
        printf(" kstack_base          = 0x%016lx\n", p->kstack & ~(THREAD_SIZE - 1));
        DUMP(p, stab_rr, "lx");
@@ -2403,7 +2407,7 @@ static void dump_one_paca(int cpu)
 #endif
 
 #ifdef CONFIG_PPC_POWERNV
-       DUMP(p, core_idle_state_ptr, "p");
+       DUMP(p, core_idle_state_ptr, "px");
        DUMP(p, thread_idle_state, "x");
        DUMP(p, thread_mask, "x");
        DUMP(p, subcore_sibling_mask, "x");
index e14f381..c1b0a9a 100644 (file)
@@ -207,7 +207,8 @@ struct kvm_s390_sie_block {
        __u16   ipa;                    /* 0x0056 */
        __u32   ipb;                    /* 0x0058 */
        __u32   scaoh;                  /* 0x005c */
-       __u8    reserved60;             /* 0x0060 */
+#define FPF_BPBC       0x20
+       __u8    fpf;                    /* 0x0060 */
 #define ECB_GS         0x40
 #define ECB_TE         0x10
 #define ECB_SRSI       0x04
index 38535a5..4cdaa55 100644 (file)
@@ -224,6 +224,7 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_RICCB  (1UL << 7)
 #define KVM_SYNC_FPRS   (1UL << 8)
 #define KVM_SYNC_GSCB   (1UL << 9)
+#define KVM_SYNC_BPBC   (1UL << 10)
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
@@ -247,7 +248,9 @@ struct kvm_sync_regs {
        };
        __u8  reserved[512];    /* for future vector expansion */
        __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
-       __u8 padding1[52];      /* riccb needs to be 64byte aligned */
+       __u8 bpbc : 1;          /* bp mode */
+       __u8 reserved2 : 7;
+       __u8 padding1[51];      /* riccb needs to be 64byte aligned */
        __u8 riccb[64];         /* runtime instrumentation controls block */
        __u8 padding2[192];     /* sdnx needs to be 256byte aligned */
        union {
index 2c93cbb..2598cf2 100644 (file)
@@ -421,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_GS:
                r = test_facility(133);
                break;
+       case KVM_CAP_S390_BPB:
+               r = test_facility(82);
+               break;
        default:
                r = 0;
        }
@@ -2198,6 +2201,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm_s390_set_prefix(vcpu, 0);
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+       if (test_kvm_facility(vcpu->kvm, 82))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
        if (test_kvm_facility(vcpu->kvm, 133))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
        /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2339,6 +2344,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        current->thread.fpu.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
+       vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
        kvm_clear_async_pf_completion_queue(vcpu);
        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3298,6 +3304,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
                vcpu->arch.gs_enabled = 1;
        }
+       if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+           test_kvm_facility(vcpu->kvm, 82)) {
+               vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+               vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+       }
        save_access_regs(vcpu->arch.host_acrs);
        restore_access_regs(vcpu->run->s.regs.acrs);
        /* save host (userspace) fprs/vrs */
@@ -3344,6 +3355,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        kvm_run->s.regs.pft = vcpu->arch.pfault_token;
        kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
        kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+       kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
        /* Save guest register state */
index 5d6ae03..7513483 100644 (file)
@@ -223,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        memcpy(scb_o->gcr, scb_s->gcr, 128);
        scb_o->pp = scb_s->pp;
 
+       /* branch prediction */
+       if (test_kvm_facility(vcpu->kvm, 82)) {
+               scb_o->fpf &= ~FPF_BPBC;
+               scb_o->fpf |= scb_s->fpf & FPF_BPBC;
+       }
+
        /* interrupt intercept */
        switch (scb_s->icptcode) {
        case ICPT_PROGI:
@@ -265,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        scb_s->ecb3 = 0;
        scb_s->ecd = 0;
        scb_s->fac = 0;
+       scb_s->fpf = 0;
 
        rc = prepare_cpuflags(vcpu, vsie_page);
        if (rc)
@@ -324,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                        prefix_unmapped(vsie_page);
                scb_s->ecb |= scb_o->ecb & ECB_TE;
        }
+       /* branch prediction */
+       if (test_kvm_facility(vcpu->kvm, 82))
+               scb_s->fpf |= scb_o->fpf & FPF_BPBC;
        /* SIMD */
        if (test_kvm_facility(vcpu->kvm, 129)) {
                scb_s->eca |= scb_o->eca & ECA_VX;
index 1cec2c6..c53298d 100644 (file)
@@ -7496,13 +7496,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 
 int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
+       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
                /*
                 * When EFER.LME and CR0.PG are set, the processor is in
                 * 64-bit mode (though maybe in a 32-bit code segment).
                 * CR4.PAE and EFER.LMA must be set.
                 */
-               if (!(sregs->cr4 & X86_CR4_PAE_BIT)
+               if (!(sregs->cr4 & X86_CR4_PAE)
                    || !(sregs->efer & EFER_LMA))
                        return -EINVAL;
        } else {
index 3ef362f..e1d61e8 100644 (file)
@@ -738,7 +738,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
        return total;
 }
 
-void __init sme_encrypt_kernel(struct boot_params *bp)
+void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
 {
        unsigned long workarea_start, workarea_end, workarea_len;
        unsigned long execute_start, execute_end, execute_len;
index 8193b38..3c09122 100644 (file)
@@ -4449,6 +4449,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         * https://bugzilla.kernel.org/show_bug.cgi?id=121671
         */
        { "LITEON CX1-JB*-HP",  NULL,           ATA_HORKAGE_MAX_SEC_1024 },
+       { "LITEON EP1-*",       NULL,           ATA_HORKAGE_MAX_SEC_1024 },
 
        /* Devices we expect to fail diagnostics */
 
index 02d78f6..ba8acca 100644 (file)
@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
 
 config BCMA_DRIVER_PCI_HOSTMODE
        bool "Driver for PCI core working in hostmode"
-       depends on MIPS && BCMA_DRIVER_PCI
+       depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
        help
          PCI core hostmode operation (external PCI bus).
 
index f9042bc..7b14d62 100644 (file)
@@ -152,14 +152,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
 {
        unsigned long get_mask = 0;
        unsigned long set_mask = 0;
-       int bit = 0;
 
-       while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) {
-               if (gc->bgpio_dir & BIT(bit))
-                       set_mask |= BIT(bit);
-               else
-                       get_mask |= BIT(bit);
-       }
+       /* Make sure we first clear any bits that are zero when we read the register */
+       *bits &= ~*mask;
+
+       /* Exploit the fact that we know which directions are set */
+       set_mask = *mask & gc->bgpio_dir;
+       get_mask = *mask & ~gc->bgpio_dir;
 
        if (set_mask)
                *bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -176,13 +175,13 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
 
 /*
  * This only works if the bits in the GPIO register are in native endianness.
- * It is dirt simple and fast in this case. (Also the most common case.)
  */
 static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
                              unsigned long *bits)
 {
-
-       *bits = gc->read_reg(gc->reg_dat) & *mask;
+       /* Make sure we first clear any bits that are zero when we read the register */
+       *bits &= ~*mask;
+       *bits |= gc->read_reg(gc->reg_dat) & *mask;
        return 0;
 }
 
@@ -196,9 +195,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
        unsigned long val;
        int bit;
 
+       /* Make sure we first clear any bits that are zero when we read the register */
+       *bits &= ~*mask;
+
        /* Create a mirrored mask */
-       bit = 0;
-       while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio)
+       bit = -1;
+       while ((bit = find_next_bit(mask, gc->ngpio, bit + 1)) < gc->ngpio)
                readmask |= bgpio_line2mask(gc, bit);
 
        /* Read the register */
@@ -208,8 +210,8 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
         * Mirror the result into the "bits" result, this will give line 0
         * in bit 0 ... line 31 in bit 31 for a 32bit register.
         */
-       bit = 0;
-       while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio)
+       bit = -1;
+       while ((bit = find_next_bit(&val, gc->ngpio, bit + 1)) < gc->ngpio)
                *bits |= bgpio_line2mask(gc, bit);
 
        return 0;
index 123585e..50f8443 100644 (file)
@@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
             pipe_name(pipe));
 }
 
-static void assert_cursor(struct drm_i915_private *dev_priv,
-                         enum pipe pipe, bool state)
-{
-       bool cur_state;
-
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
-               cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-       else
-               cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-
-       I915_STATE_WARN(cur_state != state,
-            "cursor on pipe %c assertion failure (expected %s, current %s)\n",
-                       pipe_name(pipe), onoff(state), onoff(cur_state));
-}
-#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
-#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
-
 void assert_pipe(struct drm_i915_private *dev_priv,
                 enum pipe pipe, bool state)
 {
@@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
                        pipe_name(pipe), onoff(state), onoff(cur_state));
 }
 
-static void assert_plane(struct drm_i915_private *dev_priv,
-                        enum plane plane, bool state)
+static void assert_plane(struct intel_plane *plane, bool state)
 {
-       u32 val;
-       bool cur_state;
+       bool cur_state = plane->get_hw_state(plane);
 
-       val = I915_READ(DSPCNTR(plane));
-       cur_state = !!(val & DISPLAY_PLANE_ENABLE);
        I915_STATE_WARN(cur_state != state,
-            "plane %c assertion failure (expected %s, current %s)\n",
-                       plane_name(plane), onoff(state), onoff(cur_state));
+                       "%s assertion failure (expected %s, current %s)\n",
+                       plane->base.name, onoff(state), onoff(cur_state));
 }
 
-#define assert_plane_enabled(d, p) assert_plane(d, p, true)
-#define assert_plane_disabled(d, p) assert_plane(d, p, false)
-
-static void assert_planes_disabled(struct drm_i915_private *dev_priv,
-                                  enum pipe pipe)
-{
-       int i;
-
-       /* Primary planes are fixed to pipes on gen4+ */
-       if (INTEL_GEN(dev_priv) >= 4) {
-               u32 val = I915_READ(DSPCNTR(pipe));
-               I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
-                    "plane %c assertion failure, should be disabled but not\n",
-                    plane_name(pipe));
-               return;
-       }
+#define assert_plane_enabled(p) assert_plane(p, true)
+#define assert_plane_disabled(p) assert_plane(p, false)
 
-       /* Need to check both planes against the pipe */
-       for_each_pipe(dev_priv, i) {
-               u32 val = I915_READ(DSPCNTR(i));
-               enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
-                       DISPPLANE_SEL_PIPE_SHIFT;
-               I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
-                    "plane %c assertion failure, should be off on pipe %c but is still active\n",
-                    plane_name(i), pipe_name(pipe));
-       }
-}
-
-static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
-                                   enum pipe pipe)
+static void assert_planes_disabled(struct intel_crtc *crtc)
 {
-       int sprite;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_plane *plane;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               for_each_sprite(dev_priv, pipe, sprite) {
-                       u32 val = I915_READ(PLANE_CTL(pipe, sprite));
-                       I915_STATE_WARN(val & PLANE_CTL_ENABLE,
-                            "plane %d assertion failure, should be off on pipe %c but is still active\n",
-                            sprite, pipe_name(pipe));
-               }
-       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               for_each_sprite(dev_priv, pipe, sprite) {
-                       u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
-                       I915_STATE_WARN(val & SP_ENABLE,
-                            "sprite %c assertion failure, should be off on pipe %c but is still active\n",
-                            sprite_name(pipe, sprite), pipe_name(pipe));
-               }
-       } else if (INTEL_GEN(dev_priv) >= 7) {
-               u32 val = I915_READ(SPRCTL(pipe));
-               I915_STATE_WARN(val & SPRITE_ENABLE,
-                    "sprite %c assertion failure, should be off on pipe %c but is still active\n",
-                    plane_name(pipe), pipe_name(pipe));
-       } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
-               u32 val = I915_READ(DVSCNTR(pipe));
-               I915_STATE_WARN(val & DVS_ENABLE,
-                    "sprite %c assertion failure, should be off on pipe %c but is still active\n",
-                    plane_name(pipe), pipe_name(pipe));
-       }
+       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+               assert_plane_disabled(plane);
 }
 
 static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -1918,9 +1849,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
 
        DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
 
-       assert_planes_disabled(dev_priv, pipe);
-       assert_cursor_disabled(dev_priv, pipe);
-       assert_sprites_disabled(dev_priv, pipe);
+       assert_planes_disabled(crtc);
 
        /*
         * A pipe without a PLL won't actually be able to drive bits from
@@ -1989,9 +1918,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
         * Make sure planes won't keep trying to pump pixels to us,
         * or we might hang the display.
         */
-       assert_planes_disabled(dev_priv, pipe);
-       assert_cursor_disabled(dev_priv, pipe);
-       assert_sprites_disabled(dev_priv, pipe);
+       assert_planes_disabled(crtc);
 
        reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
@@ -2820,6 +2747,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
                      crtc_state->active_planes);
 }
 
+static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+                                        struct intel_plane *plane)
+{
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->base.state);
+       struct intel_plane_state *plane_state =
+               to_intel_plane_state(plane->base.state);
+
+       intel_set_plane_visible(crtc_state, plane_state, false);
+
+       if (plane->id == PLANE_PRIMARY)
+               intel_pre_disable_primary_noatomic(&crtc->base);
+
+       trace_intel_disable_plane(&plane->base, crtc);
+       plane->disable_plane(plane, crtc);
+}
+
 static void
 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                             struct intel_initial_plane_config *plane_config)
@@ -2877,12 +2821,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * simplest solution is to just disable the primary plane now and
         * pretend the BIOS never had it enabled.
         */
-       intel_set_plane_visible(to_intel_crtc_state(crtc_state),
-                               to_intel_plane_state(plane_state),
-                               false);
-       intel_pre_disable_primary_noatomic(&intel_crtc->base);
-       trace_intel_disable_plane(primary, intel_crtc);
-       intel_plane->disable_plane(intel_plane, intel_crtc);
+       intel_plane_disable_noatomic(intel_crtc, intel_plane);
 
        return;
 
@@ -3385,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
+{
+
+       struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane plane = primary->plane;
+       enum pipe pipe = primary->pipe;
+       bool ret;
+
+       /*
+        * Not 100% correct for planes that can move between pipes,
+        * but that's only the case for gen2-4 which don't have any
+        * display power wells.
+        */
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32
 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
 {
@@ -4866,7 +4830,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
         * a vblank wait.
         */
 
-       assert_plane_enabled(dev_priv, crtc->plane);
+       assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
        if (IS_BROADWELL(dev_priv)) {
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
@@ -4899,7 +4864,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
        if (!crtc->config->ips_enabled)
                return;
 
-       assert_plane_enabled(dev_priv, crtc->plane);
+       assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
        if (IS_BROADWELL(dev_priv)) {
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -5899,6 +5865,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        enum intel_display_power_domain domain;
+       struct intel_plane *plane;
        u64 domains;
        struct drm_atomic_state *state;
        struct intel_crtc_state *crtc_state;
@@ -5907,11 +5874,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
        if (!intel_crtc->active)
                return;
 
-       if (crtc->primary->state->visible) {
-               intel_pre_disable_primary_noatomic(crtc);
+       for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+               const struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
 
-               intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
-               crtc->primary->state->visible = false;
+               if (plane_state->base.visible)
+                       intel_plane_disable_noatomic(intel_crtc, plane);
        }
 
        state = drm_atomic_state_alloc(crtc->dev);
@@ -9477,6 +9445,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
        i845_update_cursor(plane, NULL, NULL);
 }
 
+static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
                           const struct intel_plane_state *plane_state)
 {
@@ -9670,6 +9655,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
        i9xx_update_cursor(plane, NULL, NULL);
 }
 
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       /*
+        * Not 100% correct for planes that can move between pipes,
+        * but that's only the case for gen2-3 which don't have any
+        * display power wells.
+        */
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
 
 /* VESA 640x480x72Hz mode to set on the pipe */
 static const struct drm_display_mode load_detect_mode = {
@@ -13205,6 +13212,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 
                primary->update_plane = skl_update_plane;
                primary->disable_plane = skl_disable_plane;
+               primary->get_hw_state = skl_plane_get_hw_state;
        } else if (INTEL_GEN(dev_priv) >= 9) {
                intel_primary_formats = skl_primary_formats;
                num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13215,6 +13223,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 
                primary->update_plane = skl_update_plane;
                primary->disable_plane = skl_disable_plane;
+               primary->get_hw_state = skl_plane_get_hw_state;
        } else if (INTEL_GEN(dev_priv) >= 4) {
                intel_primary_formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13222,6 +13231,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 
                primary->update_plane = i9xx_update_primary_plane;
                primary->disable_plane = i9xx_disable_primary_plane;
+               primary->get_hw_state = i9xx_plane_get_hw_state;
        } else {
                intel_primary_formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13229,6 +13239,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 
                primary->update_plane = i9xx_update_primary_plane;
                primary->disable_plane = i9xx_disable_primary_plane;
+               primary->get_hw_state = i9xx_plane_get_hw_state;
        }
 
        if (INTEL_GEN(dev_priv) >= 9)
@@ -13318,10 +13329,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
        if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
                cursor->update_plane = i845_update_cursor;
                cursor->disable_plane = i845_disable_cursor;
+               cursor->get_hw_state = i845_cursor_get_hw_state;
                cursor->check_plane = i845_check_cursor;
        } else {
                cursor->update_plane = i9xx_update_cursor;
                cursor->disable_plane = i9xx_disable_cursor;
+               cursor->get_hw_state = i9xx_cursor_get_hw_state;
                cursor->check_plane = i9xx_check_cursor;
        }
 
@@ -14671,8 +14684,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
        DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
                      pipe_name(pipe));
 
-       assert_plane_disabled(dev_priv, PLANE_A);
-       assert_plane_disabled(dev_priv, PLANE_B);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
 
        I915_WRITE(PIPECONF(pipe), 0);
        POSTING_READ(PIPECONF(pipe));
@@ -14683,22 +14699,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
        POSTING_READ(DPLL(pipe));
 }
 
-static bool
-intel_check_plane_mapping(struct intel_crtc *crtc)
+static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
+                                  struct intel_plane *primary)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       u32 val;
+       enum plane plane = primary->plane;
+       u32 val = I915_READ(DSPCNTR(plane));
 
-       if (INTEL_INFO(dev_priv)->num_pipes == 1)
-               return true;
+       return (val & DISPLAY_PLANE_ENABLE) == 0 ||
+               (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+}
 
-       val = I915_READ(DSPCNTR(!crtc->plane));
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
 
-       if ((val & DISPLAY_PLANE_ENABLE) &&
-           (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
-               return false;
+       if (INTEL_GEN(dev_priv) >= 4)
+               return;
 
-       return true;
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
+               struct intel_plane *plane =
+                       to_intel_plane(crtc->base.primary);
+
+               if (intel_plane_mapping_ok(crtc, plane))
+                       continue;
+
+               DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
+                             plane->base.name);
+               intel_plane_disable_noatomic(crtc, plane);
+       }
 }
 
 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
@@ -14754,33 +14784,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 
                /* Disable everything but the primary plane */
                for_each_intel_plane_on_crtc(dev, crtc, plane) {
-                       if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
-                               continue;
+                       const struct intel_plane_state *plane_state =
+                               to_intel_plane_state(plane->base.state);
 
-                       trace_intel_disable_plane(&plane->base, crtc);
-                       plane->disable_plane(plane, crtc);
+                       if (plane_state->base.visible &&
+                           plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+                               intel_plane_disable_noatomic(crtc, plane);
                }
        }
 
-       /* We need to sanitize the plane -> pipe mapping first because this will
-        * disable the crtc (and hence change the state) if it is wrong. Note
-        * that gen4+ has a fixed plane -> pipe mapping.  */
-       if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
-               bool plane;
-
-               DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
-                             crtc->base.base.id, crtc->base.name);
-
-               /* Pipe has the wrong plane attached and the plane is active.
-                * Temporarily change the plane mapping and disable everything
-                * ...  */
-               plane = crtc->plane;
-               crtc->base.primary->state->visible = true;
-               crtc->plane = !plane;
-               intel_crtc_disable_noatomic(&crtc->base, ctx);
-               crtc->plane = plane;
-       }
-
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
        if (crtc->active && !intel_crtc_has_encoders(crtc))
@@ -14885,24 +14897,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
        intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
 }
 
-static bool primary_get_hw_state(struct intel_plane *plane)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
-       return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
-}
-
 /* FIXME read out full plane state for all planes */
 static void readout_plane_state(struct intel_crtc *crtc)
 {
-       struct intel_plane *primary = to_intel_plane(crtc->base.primary);
-       bool visible;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->base.state);
+       struct intel_plane *plane;
 
-       visible = crtc->active && primary_get_hw_state(primary);
+       for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+               struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
+               bool visible = plane->get_hw_state(plane);
 
-       intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
-                               to_intel_plane_state(primary->base.state),
-                               visible);
+               intel_set_plane_visible(crtc_state, plane_state, visible);
+       }
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15100,6 +15109,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        /* HW state is read out, now we need to sanitize this mess. */
        get_encoder_power_domains(dev_priv);
 
+       intel_sanitize_plane_mapping(dev_priv);
+
        for_each_intel_encoder(dev, encoder) {
                intel_sanitize_encoder(encoder);
        }
index 6c7f8bc..5d77f75 100644 (file)
@@ -862,6 +862,7 @@ struct intel_plane {
                             const struct intel_plane_state *plane_state);
        void (*disable_plane)(struct intel_plane *plane,
                              struct intel_crtc *crtc);
+       bool (*get_hw_state)(struct intel_plane *plane);
        int (*check_plane)(struct intel_plane *plane,
                           struct intel_crtc_state *crtc_state,
                           struct intel_plane_state *state);
@@ -1924,6 +1925,7 @@ void skl_update_plane(struct intel_plane *plane,
                      const struct intel_crtc_state *crtc_state,
                      const struct intel_plane_state *plane_state);
 void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
+bool skl_plane_get_hw_state(struct intel_plane *plane);
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_i915_private *dev_priv);
index 4fcf80c..4a8a5d9 100644 (file)
@@ -329,6 +329,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+bool
+skl_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static void
 chv_update_csc(struct intel_plane *plane, uint32_t format)
 {
@@ -506,6 +526,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool
+vlv_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
@@ -646,6 +686,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool
+ivb_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret =  I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
 {
@@ -777,6 +836,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
+static bool
+g4x_plane_get_hw_state(struct intel_plane *plane)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum intel_display_power_domain power_domain;
+       enum pipe pipe = plane->pipe;
+       bool ret;
+
+       power_domain = POWER_DOMAIN_PIPE(pipe);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+               return false;
+
+       ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
+
 static int
 intel_check_sprite_plane(struct intel_plane *plane,
                         struct intel_crtc_state *crtc_state,
@@ -1232,6 +1310,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = skl_update_plane;
                intel_plane->disable_plane = skl_disable_plane;
+               intel_plane->get_hw_state = skl_plane_get_hw_state;
 
                plane_formats = skl_plane_formats;
                num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1242,6 +1321,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = skl_update_plane;
                intel_plane->disable_plane = skl_disable_plane;
+               intel_plane->get_hw_state = skl_plane_get_hw_state;
 
                plane_formats = skl_plane_formats;
                num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1252,6 +1332,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = vlv_update_plane;
                intel_plane->disable_plane = vlv_disable_plane;
+               intel_plane->get_hw_state = vlv_plane_get_hw_state;
 
                plane_formats = vlv_plane_formats;
                num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1267,6 +1348,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = ivb_update_plane;
                intel_plane->disable_plane = ivb_disable_plane;
+               intel_plane->get_hw_state = ivb_plane_get_hw_state;
 
                plane_formats = snb_plane_formats;
                num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1277,6 +1359,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
                intel_plane->update_plane = g4x_update_plane;
                intel_plane->disable_plane = g4x_disable_plane;
+               intel_plane->get_hw_state = g4x_plane_get_hw_state;
 
                modifiers = i9xx_plane_format_modifiers;
                if (IS_GEN6(dev_priv)) {
index 0760b93..baab933 100644 (file)
@@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
index 435ff86..ef68741 100644 (file)
@@ -1447,11 +1447,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
                                args.nv50.ro = 0;
                                args.nv50.kind = mem->kind;
                                args.nv50.comp = mem->comp;
+                               argc = sizeof(args.nv50);
                                break;
                        case NVIF_CLASS_MEM_GF100:
                                args.gf100.version = 0;
                                args.gf100.ro = 0;
                                args.gf100.kind = mem->kind;
+                               argc = sizeof(args.gf100);
                                break;
                        default:
                                WARN_ON(1);
@@ -1459,7 +1461,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
                        }
 
                        ret = nvif_object_map_handle(&mem->mem.object,
-                                                    &argc, argc,
+                                                    &args, argc,
                                                     &handle, &length);
                        if (ret != 1)
                                return ret ? ret : -EINVAL;
index 00eeaaf..08e77cd 100644 (file)
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
        .i2c = g94_i2c_new,
        .imem = nv50_instmem_new,
        .mc = g98_mc_new,
-       .mmu = g84_mmu_new,
+       .mmu = mcp77_mmu_new,
        .mxm = nv50_mxm_new,
        .pci = g94_pci_new,
        .therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
        .i2c = g94_i2c_new,
        .imem = nv50_instmem_new,
        .mc = g98_mc_new,
-       .mmu = g84_mmu_new,
+       .mmu = mcp77_mmu_new,
        .mxm = nv50_mxm_new,
        .pci = g94_pci_new,
        .therm = g84_therm_new,
index 9646ade..243f0a5 100644 (file)
@@ -73,7 +73,8 @@ static int
 nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_bar *bar = nvkm_bar(subdev);
-       bar->func->bar1.fini(bar);
+       if (bar->func->bar1.fini)
+               bar->func->bar1.fini(bar);
        return 0;
 }
 
index b10077d..35878fb 100644 (file)
@@ -26,7 +26,6 @@ gk20a_bar_func = {
        .dtor = gf100_bar_dtor,
        .oneinit = gf100_bar_oneinit,
        .bar1.init = gf100_bar_bar1_init,
-       .bar1.fini = gf100_bar_bar1_fini,
        .bar1.wait = gf100_bar_bar1_wait,
        .bar1.vmm = gf100_bar_bar1_vmm,
        .flush = g84_bar_flush,
index 352a65f..67ee983 100644 (file)
@@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o
 nvkm-y += nvkm/subdev/mmu/nv44.o
 nvkm-y += nvkm/subdev/mmu/nv50.o
 nvkm-y += nvkm/subdev/mmu/g84.o
+nvkm-y += nvkm/subdev/mmu/mcp77.o
 nvkm-y += nvkm/subdev/mmu/gf100.o
 nvkm-y += nvkm/subdev/mmu/gk104.o
 nvkm-y += nvkm/subdev/mmu/gk20a.o
@@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o
 nvkm-y += nvkm/subdev/mmu/vmmnv41.o
 nvkm-y += nvkm/subdev/mmu/vmmnv44.o
 nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
 nvkm-y += nvkm/subdev/mmu/vmmgf100.o
 nvkm-y += nvkm/subdev/mmu/vmmgk104.o
 nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644 (file)
index 0000000..0527b50
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+mcp77_mmu = {
+       .dma_bits = 40,
+       .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+       .mem = {{ -1,  0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+       .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
+       .kind = nv50_mmu_kind,
+       .kind_sys = true,
+};
+
+int
+mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+       return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+}
index 6d8f61e..da06e64 100644 (file)
@@ -95,6 +95,9 @@ struct nvkm_vmm_desc {
        const struct nvkm_vmm_desc_func *func;
 };
 
+extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
+extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
+
 extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
 extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
 extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
@@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
                  const char *, struct nvkm_vmm **);
 int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
 
+int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void nv50_vmm_flush(struct nvkm_vmm *, int);
+
 int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
                   struct nvkm_mmu *, u64, u64, void *, u32,
                   struct lock_class_key *, const char *, struct nvkm_vmm **);
@@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                 struct lock_class_key *, const char *, struct nvkm_vmm **);
 int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                 struct lock_class_key *, const char *, struct nvkm_vmm **);
+int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+                 struct lock_class_key *, const char *, struct nvkm_vmm **);
 int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                struct lock_class_key *, const char *, struct nvkm_vmm **);
 int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644 (file)
index 0000000..e63d984
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+mcp77_vmm = {
+       .join = nv50_vmm_join,
+       .part = nv50_vmm_part,
+       .valid = nv50_vmm_valid,
+       .flush = nv50_vmm_flush,
+       .page_block = 1 << 29,
+       .page = {
+               { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
+               { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+               {}
+       }
+};
+
+int
+mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+             struct lock_class_key *key, const char *name,
+             struct nvkm_vmm **pvmm)
+{
+       return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
+                            argv, argc, key, name, pvmm);
+}
index 863a2ed..64f75d9 100644 (file)
@@ -32,7 +32,7 @@ static inline void
 nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
                 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
 {
-       u64 next = addr | map->type, data;
+       u64 next = addr + map->type, data;
        u32 pten;
        int log2blk;
 
@@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
                VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
                nvkm_kmap(pt->memory);
                while (ptes--) {
-                       const u64 data = *map->dma++ | map->type;
+                       const u64 data = *map->dma++ + map->type;
                        VMM_WO064(pt, vmm, ptei++ * 8, data);
                        map->type += map->ctag;
                }
@@ -163,21 +163,21 @@ nv50_vmm_pgd = {
        .pde = nv50_vmm_pgd_pde,
 };
 
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
 nv50_vmm_desc_12[] = {
        { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
        { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
        {}
 };
 
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
 nv50_vmm_desc_16[] = {
        { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
        { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
        {}
 };
 
-static void
+void
 nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
 {
        struct nvkm_subdev *subdev = &vmm->mmu->subdev;
@@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
        mutex_unlock(&subdev->mutex);
 }
 
-static int
+int
 nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
               struct nvkm_vmm_map *map)
 {
@@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
        return 0;
 }
 
-static void
+void
 nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
        struct nvkm_vmm_join *join;
@@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
        }
 }
 
-static int
+int
 nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
        const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
index dc332ea..3ecffa5 100644 (file)
@@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
                                        goto out;
                                }
 
-                               if (abs(rate - rounded / i) <
-                                   abs(rate - best_parent / best_div)) {
+                               if (!best_parent ||
+                                   abs(rate - rounded / i / j) <
+                                   abs(rate - best_parent / best_half /
+                                       best_div)) {
                                        best_parent = rounded;
-                                       best_div = i;
+                                       best_half = i;
+                                       best_div = j;
                                }
                        }
                }
index 641294a..fcd5814 100644 (file)
@@ -1863,7 +1863,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  */
 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       return -ENOSYS;
+       return -EINVAL;
 }
 
 /**
index b8a0980..3824595 100644 (file)
@@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
        .set_property = vmw_du_connector_set_property,
        .destroy = vmw_ldu_connector_destroy,
        .reset = vmw_du_connector_reset,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+       .atomic_destroy_state = vmw_du_connector_destroy_state,
        .atomic_set_property = vmw_du_connector_atomic_set_property,
        .atomic_get_property = vmw_du_connector_atomic_get_property,
 };
index bc5f602..63a4cd7 100644 (file)
@@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
        .set_property = vmw_du_connector_set_property,
        .destroy = vmw_sou_connector_destroy,
        .reset = vmw_du_connector_reset,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+       .atomic_destroy_state = vmw_du_connector_destroy_state,
        .atomic_set_property = vmw_du_connector_atomic_set_property,
        .atomic_get_property = vmw_du_connector_atomic_get_property,
 };
index 706164b..f7829a7 100644 (file)
@@ -821,8 +821,12 @@ void i2c_unregister_device(struct i2c_client *client)
 {
        if (!client)
                return;
-       if (client->dev.of_node)
+
+       if (client->dev.of_node) {
                of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+               of_node_put(client->dev.of_node);
+       }
+
        if (ACPI_COMPANION(&client->dev))
                acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
        device_unregister(&client->dev);
index 4bb9927..a1082c0 100644 (file)
@@ -397,16 +397,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                                   the underlying bus driver */
                break;
        case I2C_SMBUS_I2C_BLOCK_DATA:
+               if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+                       dev_err(&adapter->dev, "Invalid block %s size %d\n",
+                               read_write == I2C_SMBUS_READ ? "read" : "write",
+                               data->block[0]);
+                       return -EINVAL;
+               }
+
                if (read_write == I2C_SMBUS_READ) {
                        msg[1].len = data->block[0];
                } else {
                        msg[0].len = data->block[0] + 1;
-                       if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
-                               dev_err(&adapter->dev,
-                                       "Invalid block write size %d\n",
-                                       data->block[0]);
-                               return -EINVAL;
-                       }
                        for (i = 1; i <= data->block[0]; i++)
                                msgbuf0[i] = data->block[i];
                }
index 6c51d40..c37aea9 100644 (file)
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
                         twl4030_vibra_suspend, twl4030_vibra_resume);
 
 static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
-                             struct device_node *node)
+                             struct device_node *parent)
 {
+       struct device_node *node;
+
        if (pdata && pdata->coexist)
                return true;
 
-       node = of_find_node_by_name(node, "codec");
+       node = of_get_child_by_name(parent, "codec");
        if (node) {
                of_node_put(node);
                return true;
index 5690eb7..15e0d35 100644 (file)
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
        int vddvibr_uV = 0;
        int error;
 
-       of_node_get(twl6040_core_dev->of_node);
-       twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
+       twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
                                                 "vibra");
        if (!twl6040_core_node) {
                dev_err(&pdev->dev, "parent of node is missing?\n");
index 579b899..dbe57da 100644 (file)
@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
        case SS4_PACKET_ID_MULTI:
                if (priv->flags & ALPS_BUTTONPAD) {
                        if (IS_SS4PLUS_DEV(priv->dev_id)) {
-                               f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
-                               f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+                               f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+                               f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+                               no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
                        } else {
                                f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
                                f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+                               no_data_x = SS4_MFPACKET_NO_AX_BL;
                        }
+                       no_data_y = SS4_MFPACKET_NO_AY_BL;
 
                        f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
                        f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
-                       no_data_x = SS4_MFPACKET_NO_AX_BL;
-                       no_data_y = SS4_MFPACKET_NO_AY_BL;
                } else {
                        if (IS_SS4PLUS_DEV(priv->dev_id)) {
-                               f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
-                               f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+                               f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+                               f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+                               no_data_x = SS4_PLUS_MFPACKET_NO_AX;
                        } else {
-                               f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
-                               f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+                               f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+                               f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+                               no_data_x = SS4_MFPACKET_NO_AX;
                        }
+                       no_data_y = SS4_MFPACKET_NO_AY;
+
                        f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
                        f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
-                       no_data_x = SS4_MFPACKET_NO_AX;
-                       no_data_y = SS4_MFPACKET_NO_AY;
                }
 
                f->first_mp = 0;
index c80a7c7..79b6d69 100644 (file)
@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
 #define SS4_TS_Z_V2(_b)                (s8)(_b[4] & 0x7F)
 
 
-#define SS4_MFPACKET_NO_AX     8160    /* X-Coordinate value */
-#define SS4_MFPACKET_NO_AY     4080    /* Y-Coordinate value */
-#define SS4_MFPACKET_NO_AX_BL  8176    /* Buttonless X-Coordinate value */
-#define SS4_MFPACKET_NO_AY_BL  4088    /* Buttonless Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX             8160    /* X-Coordinate value */
+#define SS4_MFPACKET_NO_AY             4080    /* Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX_BL          8176    /* Buttonless X-Coord value */
+#define SS4_MFPACKET_NO_AY_BL          4088    /* Buttonless Y-Coord value */
+#define SS4_PLUS_MFPACKET_NO_AX                4080    /* SS4 PLUS, X */
+#define SS4_PLUS_MFPACKET_NO_AX_BL     4088    /* Buttonless SS4 PLUS, X */
 
 /*
  * enum V7_PACKET_ID - defines the packet type for V7
index ee5466a..cd9f61c 100644 (file)
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0046", /* X250 */
        "LEN004a", /* W541 */
        "LEN200f", /* T450s */
+       "LEN2018", /* T460p */
        NULL
 };
 
index 4f2bb59..141ea22 100644 (file)
@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
                rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
                        "Failed to process interrupt request: %d\n", ret);
 
-       if (count)
+       if (count) {
                kfree(attn_data.data);
+               attn_data.data = NULL;
+       }
 
        if (!kfifo_is_empty(&drvdata->attn_fifo))
                return rmi_irq_fn(irq, dev_id);
index 7ed828a..3486d94 100644 (file)
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
        int data, n, ret;
        if (!np)
                return -ENODEV;
-       np = of_find_node_by_name(np, "touch");
+       np = of_get_child_by_name(np, "touch");
        if (!np) {
                dev_err(&pdev->dev, "Can't find touch node\n");
                return -EINVAL;
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
        if (data) {
                ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
                if (ret < 0)
-                       return -EINVAL;
+                       goto err_put_node;
        }
        /* set tsi prebias time */
        if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
                ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
                if (ret < 0)
-                       return -EINVAL;
+                       goto err_put_node;
        }
        /* set prebias & prechg time of pen detect */
        data = 0;
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
        if (data) {
                ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
                if (ret < 0)
-                       return -EINVAL;
+                       goto err_put_node;
        }
        of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
+
+       of_node_put(np);
+
        return 0;
+
+err_put_node:
+       of_node_put(np);
+
+       return -EINVAL;
 }
 #else
 #define pm860x_touch_dt_init(x, y, z)  (-1)
index 8d7f9c8..9642f10 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/input.h>
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
+#include <linux/module.h>
 
 static bool touchscreen_get_prop_u32(struct device *dev,
                                     const char *property,
@@ -185,3 +186,6 @@ void touchscreen_report_pos(struct input_dev *input,
        input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
 }
 EXPORT_SYMBOL(touchscreen_report_pos);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
index 9fc12f5..554d603 100644 (file)
@@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
        /* Ignore extra keys (which are used for IV etc) */
        subkey_size = crypt_subkey_size(cc);
 
-       if (crypt_integrity_hmac(cc))
+       if (crypt_integrity_hmac(cc)) {
+               if (subkey_size < cc->key_mac_size)
+                       return -EINVAL;
+
                crypt_copy_authenckey(cc->authenc_key, cc->key,
                                      subkey_size - cc->key_mac_size,
                                      cc->key_mac_size);
+       }
+
        for (i = 0; i < cc->tfms_count; i++) {
                if (crypt_integrity_hmac(cc))
                        r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
 
        ret = crypt_setkey(cc);
 
-       /* wipe the kernel key payload copy in each case */
-       memset(cc->key, 0, cc->key_size * sizeof(u8));
-
        if (!ret) {
                set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
                kzfree(cc->key_string);
@@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
                }
        }
 
+       /* wipe the kernel key payload copy */
+       if (cc->key_string)
+               memset(cc->key, 0, cc->key_size * sizeof(u8));
+
        return ret;
 }
 
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                        cc->tag_pool_max_sectors * cc->on_disk_tag_size);
                if (!cc->tag_pool) {
                        ti->error = "Cannot allocate integrity tags mempool";
+                       ret = -ENOMEM;
                        goto bad;
                }
 
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
                                return ret;
                        if (cc->iv_gen_ops && cc->iv_gen_ops->init)
                                ret = cc->iv_gen_ops->init(cc);
+                       /* wipe the kernel key payload copy */
+                       if (cc->key_string)
+                               memset(cc->key, 0, cc->key_size * sizeof(u8));
                        return ret;
                }
                if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type crypt_target = {
        .name   = "crypt",
-       .version = {1, 18, 0},
+       .version = {1, 18, 1},
        .module = THIS_MODULE,
        .ctr    = crypt_ctr,
        .dtr    = crypt_dtr,
index 05c7bfd..46d7c87 100644 (file)
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
        int r = 0;
        unsigned i;
        __u64 journal_pages, journal_desc_size, journal_tree_size;
-       unsigned char *crypt_data = NULL;
+       unsigned char *crypt_data = NULL, *crypt_iv = NULL;
+       struct skcipher_request *req = NULL;
 
        ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
        ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
 
                if (blocksize == 1) {
                        struct scatterlist *sg;
-                       SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
-                       unsigned char iv[ivsize];
-                       skcipher_request_set_tfm(req, ic->journal_crypt);
+
+                       req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+                       if (!req) {
+                               *error = "Could not allocate crypt request";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
+
+                       crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+                       if (!crypt_iv) {
+                               *error = "Could not allocate iv";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
 
                        ic->journal_xor = dm_integrity_alloc_page_list(ic);
                        if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                sg_set_buf(&sg[i], va, PAGE_SIZE);
                        }
                        sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
-                       memset(iv, 0x00, ivsize);
+                       memset(crypt_iv, 0x00, ivsize);
 
-                       skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
+                       skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
                        init_completion(&comp.comp);
                        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
                        if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                        crypto_free_skcipher(ic->journal_crypt);
                        ic->journal_crypt = NULL;
                } else {
-                       SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
-                       unsigned char iv[ivsize];
                        unsigned crypt_len = roundup(ivsize, blocksize);
 
+                       req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+                       if (!req) {
+                               *error = "Could not allocate crypt request";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
+
+                       crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+                       if (!crypt_iv) {
+                               *error = "Could not allocate iv";
+                               r = -ENOMEM;
+                               goto bad;
+                       }
+
                        crypt_data = kmalloc(crypt_len, GFP_KERNEL);
                        if (!crypt_data) {
                                *error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                goto bad;
                        }
 
-                       skcipher_request_set_tfm(req, ic->journal_crypt);
-
                        ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
                        if (!ic->journal_scatterlist) {
                                *error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
                                struct skcipher_request *section_req;
                                __u32 section_le = cpu_to_le32(i);
 
-                               memset(iv, 0x00, ivsize);
+                               memset(crypt_iv, 0x00, ivsize);
                                memset(crypt_data, 0x00, crypt_len);
                                memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
 
                                sg_init_one(&sg, crypt_data, crypt_len);
-                               skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
+                               skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
                                init_completion(&comp.comp);
                                comp.in_flight = (atomic_t)ATOMIC_INIT(1);
                                if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
        }
 bad:
        kfree(crypt_data);
+       kfree(crypt_iv);
+       skcipher_request_free(req);
+
        return r;
 }
 
index d31d18d..36ef284 100644 (file)
 #define SECTOR_TO_BLOCK_SHIFT 3
 
 /*
+ * For btree insert:
  *  3 for btree insert +
  *  2 for btree lookup used within space map
+ * For btree remove:
+ *  2 for shadow spine +
+ *  4 for rebalance 3 child node
  */
-#define THIN_MAX_CONCURRENT_LOCKS 5
+#define THIN_MAX_CONCURRENT_LOCKS 6
 
 /* This should be plenty */
 #define SPACE_MAP_ROOT_SIZE 128
index f21ce6a..58b3197 100644 (file)
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
        pn->keys[1] = rn->keys[0];
        memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
 
-       /*
-        * rejig the spine.  This is ugly, since it knows too
-        * much about the spine
-        */
-       if (s->nodes[0] != new_parent) {
-               unlock_block(s->info, s->nodes[0]);
-               s->nodes[0] = new_parent;
-       }
-       if (key < le64_to_cpu(rn->keys[0])) {
-               unlock_block(s->info, right);
-               s->nodes[1] = left;
-       } else {
-               unlock_block(s->info, left);
-               s->nodes[1] = right;
-       }
-       s->count = 2;
-
+       unlock_block(s->info, left);
+       unlock_block(s->info, right);
        return 0;
 }
 
index 85140c9..8b941f8 100644 (file)
@@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
                return;
        }
 
+       /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
+       if (is_imx53_esdhc(imx_data)) {
+               /*
+                * According to the i.MX53 reference manual, if DLLCTRL[10] can
+                * be set, then the controller is eSDHCv3, else it is eSDHCv2.
+                */
+               val = readl(host->ioaddr + ESDHC_DLL_CTRL);
+               writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
+               temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
+               writel(val, host->ioaddr + ESDHC_DLL_CTRL);
+               if (temp & BIT(10))
+                       pre_div = 2;
+       }
+
        temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
        temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
                | ESDHC_CLOCK_MASK);
index 7ccdc3e..53d6bb0 100644 (file)
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
        void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
        int err = 0;
        u8 *packet_ptr;
-       int i, n = 1, packet_len;
+       int packet_len;
        ptrdiff_t cmd_len;
 
        /* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
        }
 
        packet_ptr = cmd_head;
+       packet_len = cmd_len;
 
        /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
-       if ((dev->udev->speed != USB_SPEED_HIGH) &&
-           (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
-               packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
-               n += cmd_len / packet_len;
-       } else {
-               packet_len = cmd_len;
-       }
+       if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
+               packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
 
-       for (i = 0; i < n; i++) {
+       do {
                err = usb_bulk_msg(dev->udev,
                                   usb_sndbulkpipe(dev->udev,
                                                   PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
                }
 
                packet_ptr += packet_len;
-       }
+               cmd_len -= packet_len;
+
+               if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
+                       packet_len = cmd_len;
+
+       } while (packet_len > 0);
 
        return err;
 }
index 7892f2f..2c2976a 100644 (file)
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout_work(struct work_struct *work)
 {
-       struct fs_enet_private *fep = netdev_priv(dev);
+       struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
+                                                  timeout_work);
+       struct net_device *dev = fep->ndev;
        unsigned long flags;
        int wake = 0;
 
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
                phy_stop(dev->phydev);
                (*fep->ops->stop)(dev);
                (*fep->ops->restart)(dev);
-               phy_start(dev->phydev);
        }
 
        phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
                netif_wake_queue(dev);
 }
 
+static void fs_timeout(struct net_device *dev)
+{
+       struct fs_enet_private *fep = netdev_priv(dev);
+
+       schedule_work(&fep->timeout_work);
+}
+
 /*-----------------------------------------------------------------------------
  *  generic link-change handler - should be sufficient for most cases
  *-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
        netif_stop_queue(dev);
        netif_carrier_off(dev);
        napi_disable(&fep->napi);
+       cancel_work_sync(&fep->timeout_work);
        phy_stop(dev->phydev);
 
        spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        ndev->netdev_ops = &fs_enet_netdev_ops;
        ndev->watchdog_timeo = 2 * HZ;
+       INIT_WORK(&fep->timeout_work, fs_timeout_work);
        netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
 
        ndev->ethtool_ops = &fs_ethtool_ops;
index 92e06b3..195fae6 100644 (file)
@@ -125,6 +125,7 @@ struct fs_enet_private {
        spinlock_t lock;        /* during all ops except TX pckt processing */
        spinlock_t tx_lock;     /* during fs_start_xmit and fs_tx         */
        struct fs_platform_info *fpi;
+       struct work_struct timeout_work;
        const struct fs_ops *ops;
        int rx_ring, tx_ring;
        dma_addr_t ring_mem_addr;
index 4b3df17..ab2e191 100644 (file)
@@ -1276,6 +1276,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        unsigned char *dst;
        u64 *handle_array;
        int index = 0;
+       u8 proto = 0;
        int ret = 0;
 
        if (adapter->resetting) {
@@ -1364,17 +1365,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (skb->protocol == htons(ETH_P_IP)) {
-               if (ip_hdr(skb)->version == 4)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
-               else if (ip_hdr(skb)->version == 6)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
-
-               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
-               else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
-                       tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
+               proto = ip_hdr(skb)->protocol;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
+               proto = ipv6_hdr(skb)->nexthdr;
        }
 
+       if (proto == IPPROTO_TCP)
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
+       else if (proto == IPPROTO_UDP)
+               tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
                hdrs += 2;
@@ -3346,7 +3348,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
                return;
        }
 
+       adapter->ip_offload_ctrl.len =
+           cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
        adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
+       adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
+       adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
        adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
        adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
        adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
index 7f60522..a434fec 100644 (file)
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
        return err;
 }
 
-#ifdef CONFIG_PM
 /**
  * fm10k_resume - Generic PM resume hook
  * @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
  * suspend or hibernation. This function does not need to handle lower PCIe
  * device state as the stack takes care of that for us.
  **/
-static int fm10k_resume(struct device *dev)
+static int __maybe_unused fm10k_resume(struct device *dev)
 {
        struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
        struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
  * system suspend or hibernation. This function does not need to handle lower
  * PCIe device state as the stack takes care of that for us.
  **/
-static int fm10k_suspend(struct device *dev)
+static int __maybe_unused fm10k_suspend(struct device *dev)
 {
        struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
        struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
        return 0;
 }
 
-#endif /* CONFIG_PM */
-
 /**
  * fm10k_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
        .id_table               = fm10k_pci_tbl,
        .probe                  = fm10k_probe,
        .remove                 = fm10k_remove,
-#ifdef CONFIG_PM
        .driver = {
                .pm             = &fm10k_pm_ops,
        },
-#endif /* CONFIG_PM */
        .sriov_configure        = fm10k_iov_configure,
        .err_handler            = &fm10k_err_handler
 };
index 434b392..6c0391c 100644 (file)
@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
        int err;
 
-       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
-       if (err)
-               return err;
        fib->lpm_tree = new_tree;
        mlxsw_sp_lpm_tree_hold(new_tree);
+       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+       if (err)
+               goto err_tree_bind;
        mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
        return 0;
+
+err_tree_bind:
+       mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+       fib->lpm_tree = old_tree;
+       return err;
 }
 
 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
@@ -868,11 +873,14 @@ err_tree_replace:
        return err;
 
 no_replace:
-       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
-       if (err)
-               return err;
        fib->lpm_tree = new_tree;
        mlxsw_sp_lpm_tree_hold(new_tree);
+       err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+       if (err) {
+               mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+               fib->lpm_tree = NULL;
+               return err;
+       }
        return 0;
 }
 
index ed58c74..f5a7eb2 100644 (file)
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
                /* warning!!!! We are retrieving the virtual ptr in the sw_data
                 * field as a 32bit value. Will not work on 64bit machines
                 */
-               page = (struct page *)GET_SW_DATA0(desc);
+               page = (struct page *)GET_SW_DATA0(ndesc);
 
                if (likely(dma_buff && buf_len && page)) {
                        dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
index 4f4a842..a8ec589 100644 (file)
@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile)
        skb_queue_purge(&tfile->sk.sk_error_queue);
 }
 
+static void tun_cleanup_tx_array(struct tun_file *tfile)
+{
+       if (tfile->tx_array.ring.queue) {
+               skb_array_cleanup(&tfile->tx_array);
+               memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+       }
+}
+
 static void __tun_detach(struct tun_file *tfile, bool clean)
 {
        struct tun_file *ntfile;
@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                            tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
                }
-               if (tun)
-                       skb_array_cleanup(&tfile->tx_array);
+               tun_cleanup_tx_array(tfile);
                sock_put(&tfile->sk);
        }
 }
@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev)
                /* Drop read queue */
                tun_queue_purge(tfile);
                sock_put(&tfile->sk);
+               tun_cleanup_tx_array(tfile);
        }
        list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
                tun_enable_queue(tfile);
                tun_queue_purge(tfile);
                sock_put(&tfile->sk);
+               tun_cleanup_tx_array(tfile);
        }
        BUG_ON(tun->numdisabled != 0);
 
@@ -2851,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 
        sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
 
+       memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+
        return 0;
 }
 
index d51d9ab..0657203 100644 (file)
@@ -606,6 +606,7 @@ enum rtl8152_flags {
        PHY_RESET,
        SCHEDULE_NAPI,
        GREEN_ETHERNET,
+       DELL_TB_RX_AGG_BUG,
 };
 
 /* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
                dev_kfree_skb_any(skb);
 
                remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
+
+               if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+                       break;
        }
 
        if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
        /* rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
        ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+       if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+               ocp_data |= RX_AGG_DISABLE;
+
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 
        rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
                netdev->hw_features &= ~NETIF_F_RXCSUM;
        }
 
+       if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
+           udev->serial && !strcmp(udev->serial, "000001000000")) {
+               dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
+               set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
+       }
+
        netdev->ethtool_ops = &ops;
        netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
 
index 6a59d06..9be0b05 100644 (file)
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
 
        err = request_firmware(&clm, clm_name, dev);
        if (err) {
-               if (err == -ENOENT) {
-                       brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n");
-                       return 0;
-               }
-               brcmf_err("request CLM blob file failed (%d)\n", err);
-               return err;
+               brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
+                          err);
+               return 0;
        }
 
        chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
index d53550e..4276ebf 100644 (file)
@@ -451,10 +451,13 @@ static void **nvme_pci_iod_list(struct request *req)
 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       int nseg = blk_rq_nr_phys_segments(req);
        unsigned int avg_seg_size;
 
-       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
-                       blk_rq_nr_phys_segments(req));
+       if (nseg == 0)
+               return false;
+
+       avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
 
        if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
                return false;
@@ -722,20 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
 }
 
 static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
-               struct request *req, struct nvme_rw_command *cmd)
+               struct request *req, struct nvme_rw_command *cmd, int entries)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       int length = blk_rq_payload_bytes(req);
        struct dma_pool *pool;
        struct nvme_sgl_desc *sg_list;
        struct scatterlist *sg = iod->sg;
-       int entries = iod->nents, i = 0;
        dma_addr_t sgl_dma;
+       int i = 0;
 
        /* setting the transfer type as SGL */
        cmd->flags = NVME_CMD_SGL_METABUF;
 
-       if (length == sg_dma_len(sg)) {
+       if (entries == 1) {
                nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
                return BLK_STS_OK;
        }
@@ -775,13 +777,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
                }
 
                nvme_pci_sgl_set_data(&sg_list[i++], sg);
-
-               length -= sg_dma_len(sg);
                sg = sg_next(sg);
-               entries--;
-       } while (length > 0);
+       } while (--entries > 0);
 
-       WARN_ON(entries > 0);
        return BLK_STS_OK;
 }
 
@@ -793,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
        enum dma_data_direction dma_dir = rq_data_dir(req) ?
                        DMA_TO_DEVICE : DMA_FROM_DEVICE;
        blk_status_t ret = BLK_STS_IOERR;
+       int nr_mapped;
 
        sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
        iod->nents = blk_rq_map_sg(q, req, iod->sg);
@@ -800,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                goto out;
 
        ret = BLK_STS_RESOURCE;
-       if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
-                               DMA_ATTR_NO_WARN))
+       nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+                       DMA_ATTR_NO_WARN);
+       if (!nr_mapped)
                goto out;
 
        if (iod->use_sgl)
-               ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+               ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
        else
                ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
 
index b4964b0..8f6e8e2 100644 (file)
@@ -410,6 +410,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
        if (ret)
                return ERR_PTR(-ENODEV);
 
+       /* This phy type handled by the usb-phy subsystem for now */
+       if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
+               return ERR_PTR(-ENODEV);
+
        mutex_lock(&phy_provider_mutex);
        phy_provider = of_phy_provider_lookup(args.np);
        if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
index 58476b7..c940685 100644 (file)
@@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
 
 int sas_eh_abort_handler(struct scsi_cmnd *cmd)
 {
-       int res;
+       int res = TMF_RESP_FUNC_FAILED;
        struct sas_task *task = TO_SAS_TASK(cmd);
        struct Scsi_Host *host = cmd->device->host;
+       struct domain_device *dev = cmd_to_domain_dev(cmd);
        struct sas_internal *i = to_sas_internal(host->transportt);
+       unsigned long flags;
 
        if (!i->dft->lldd_abort_task)
                return FAILED;
 
-       res = i->dft->lldd_abort_task(task);
+       spin_lock_irqsave(host->host_lock, flags);
+       /* We cannot do async aborts for SATA devices */
+       if (dev_is_sata(dev) && !host->host_eh_scheduled) {
+               spin_unlock_irqrestore(host->host_lock, flags);
+               return FAILED;
+       }
+       spin_unlock_irqrestore(host->host_lock, flags);
+
+       if (task)
+               res = i->dft->lldd_abort_task(task);
+       else
+               SAS_DPRINTK("no task to abort\n");
        if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
                return SUCCESS;
 
index d8e4219..71c7376 100644 (file)
@@ -32,7 +32,7 @@ config SSB_BLOCKIO
 
 config SSB_PCIHOST_POSSIBLE
        bool
-       depends on SSB && (PCI = y || PCI = SSB)
+       depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
        default y
 
 config SSB_PCIHOST
index 79375fc..d67a72d 100644 (file)
@@ -430,8 +430,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                 * safe because the task has stopped executing permanently.
                 */
                if (permitted && (task->flags & PF_DUMPCORE)) {
-                       eip = KSTK_EIP(task);
-                       esp = KSTK_ESP(task);
+                       if (try_get_task_stack(task)) {
+                               eip = KSTK_EIP(task);
+                               esp = KSTK_ESP(task);
+                               put_task_stack(task);
+                       }
                }
        }
 
index 2272ded..631354a 100644 (file)
 /* Mark a function definition as prohibited from being cloned. */
 #define __noclone      __attribute__((__noclone__, __optimize__("no-tracer")))
 
-#ifdef RANDSTRUCT_PLUGIN
+#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
 #define __randomize_layout __attribute__((randomize_layout))
 #define __no_randomize_layout __attribute__((no_randomize_layout))
 #endif
index 496e59a..8fb90a0 100644 (file)
@@ -932,6 +932,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_HYPERV_SYNIC2 148
 #define KVM_CAP_HYPERV_VP_INDEX 149
 #define KVM_CAP_S390_AIS_MIGRATION 150
+#define KVM_CAP_PPC_GET_CPU_CHAR 151
+#define KVM_CAP_S390_BPB 152
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1261,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_PPC_CONFIGURE_V3_MMU  _IOW(KVMIO,  0xaf, struct kvm_ppc_mmuv3_cfg)
 /* Available with KVM_CAP_PPC_RADIX_MMU */
 #define KVM_PPC_GET_RMMU_INFO    _IOW(KVMIO,  0xb0, struct kvm_ppc_rmmu_info)
+/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
+#define KVM_PPC_GET_CPU_CHAR     _IOR(KVMIO,  0xb1, struct kvm_ppc_cpu_char)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index 51ec2dd..7949e8b 100644 (file)
@@ -956,7 +956,7 @@ select_insn:
                DST = tmp;
                CONT;
        ALU_MOD_X:
-               if (unlikely(SRC == 0))
+               if (unlikely((u32)SRC == 0))
                        return 0;
                tmp = (u32) DST;
                DST = do_div(tmp, (u32) SRC);
@@ -975,7 +975,7 @@ select_insn:
                DST = div64_u64(DST, SRC);
                CONT;
        ALU_DIV_X:
-               if (unlikely(SRC == 0))
+               if (unlikely((u32)SRC == 0))
                        return 0;
                tmp = (u32) DST;
                do_div(tmp, (u32) SRC);
index 20eb04f..13551e6 100644 (file)
@@ -978,6 +978,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
        return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
 }
 
+static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
+{
+       const struct bpf_reg_state *reg = cur_regs(env) + regno;
+
+       return reg->type == PTR_TO_CTX;
+}
+
 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
                                   const struct bpf_reg_state *reg,
                                   int off, int size, bool strict)
@@ -1258,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
                return -EACCES;
        }
 
+       if (is_ctx_reg(env, insn->dst_reg)) {
+               verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
+                       insn->dst_reg);
+               return -EACCES;
+       }
+
        /* check whether atomic_add can read the memory */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1882,17 +1895,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        dst_reg = &regs[dst];
 
-       if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
-               print_verifier_state(env, env->cur_state);
-               verbose(env,
-                       "verifier internal error: known but bad sbounds\n");
-               return -EINVAL;
-       }
-       if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
-               print_verifier_state(env, env->cur_state);
-               verbose(env,
-                       "verifier internal error: known but bad ubounds\n");
-               return -EINVAL;
+       if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
+           smin_val > smax_val || umin_val > umax_val) {
+               /* Taint dst register if offset had invalid bounds derived from
+                * e.g. dead branches.
+                */
+               __mark_reg_unknown(dst_reg);
+               return 0;
        }
 
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
@@ -2084,6 +2093,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        src_known = tnum_is_const(src_reg.var_off);
        dst_known = tnum_is_const(dst_reg->var_off);
 
+       if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
+           smin_val > smax_val || umin_val > umax_val) {
+               /* Taint dst register if offset had invalid bounds derived from
+                * e.g. dead branches.
+                */
+               __mark_reg_unknown(dst_reg);
+               return 0;
+       }
+
        if (!src_known &&
            opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
                __mark_reg_unknown(dst_reg);
@@ -3993,6 +4011,12 @@ static int do_check(struct bpf_verifier_env *env)
                        if (err)
                                return err;
 
+                       if (is_ctx_reg(env, insn->dst_reg)) {
+                               verbose(env, "BPF_ST stores into R%d context is not allowed\n",
+                                       insn->dst_reg);
+                               return -EACCES;
+                       }
+
                        /* check that memory (dst_reg + off) is writeable */
                        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
@@ -4445,6 +4469,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
        int i, cnt, delta = 0;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
+               if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+                   insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+                       /* due to JIT bugs clear upper 32-bits of src register
+                        * before div/mod operation
+                        */
+                       insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
+                       insn_buf[1] = *insn;
+                       cnt = 2;
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
                if (insn->code != (BPF_JMP | BPF_CALL))
                        continue;
 
index 2cf06c2..7e4c445 100644 (file)
@@ -4447,6 +4447,7 @@ static struct cftype cgroup_base_files[] = {
        },
        {
                .name = "cgroup.threads",
+               .flags = CFTYPE_NS_DELEGATABLE,
                .release = cgroup_procs_release,
                .seq_start = cgroup_threads_start,
                .seq_next = cgroup_procs_next,
index 0ba0dd8..5187dfe 100644 (file)
@@ -321,15 +321,23 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
                     bool reserved, unsigned int *mapped_cpu)
 {
-       unsigned int cpu;
+       unsigned int cpu, best_cpu, maxavl = 0;
+       struct cpumap *cm;
+       unsigned int bit;
 
+       best_cpu = UINT_MAX;
        for_each_cpu(cpu, msk) {
-               struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
-               unsigned int bit;
+               cm = per_cpu_ptr(m->maps, cpu);
 
-               if (!cm->online)
+               if (!cm->online || cm->available <= maxavl)
                        continue;
 
+               best_cpu = cpu;
+               maxavl = cm->available;
+       }
+
+       if (maxavl) {
+               cm = per_cpu_ptr(m->maps, best_cpu);
                bit = matrix_alloc_area(m, cm, 1, false);
                if (bit < m->alloc_end) {
                        cm->allocated++;
@@ -338,8 +346,8 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
                        m->global_available--;
                        if (reserved)
                                m->global_reserved--;
-                       *mapped_cpu = cpu;
-                       trace_irq_matrix_alloc(bit, cpu, m, cm);
+                       *mapped_cpu = best_cpu;
+                       trace_irq_matrix_alloc(bit, best_cpu, m, cm);
                        return bit;
                }
        }
index 0cddf60..5af2842 100644 (file)
@@ -2579,8 +2579,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
                bit = RB_CTX_NORMAL;
        else
                bit = pc & NMI_MASK ? RB_CTX_NMI :
-                       pc & HARDIRQ_MASK ? RB_CTX_IRQ :
-                       pc & SOFTIRQ_OFFSET ? 2 : RB_CTX_SOFTIRQ;
+                       pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
 
        if (unlikely(val & (1 << bit)))
                return 1;
index ec0f9aa..1b87157 100644 (file)
@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
 {
        struct trace_event_call *call, *p;
        const char *last_system = NULL;
+       bool first = false;
        int last_i;
        int i;
 
@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
        list_for_each_entry_safe(call, p, &ftrace_events, list) {
                /* events are usually grouped together with systems */
                if (!last_system || call->class->system != last_system) {
+                       first = true;
                        last_i = 0;
                        last_system = call->class->system;
                }
 
+               /*
+                * Since calls are grouped by systems, the likelyhood that the
+                * next call in the iteration belongs to the same system as the
+                * previous call is high. As an optimization, we skip seaching
+                * for a map[] that matches the call's system if the last call
+                * was from the same system. That's what last_i is for. If the
+                * call has the same system as the previous call, then last_i
+                * will be the index of the first map[] that has a matching
+                * system.
+                */
                for (i = last_i; i < len; i++) {
                        if (call->class->system == map[i]->system) {
                                /* Save the first system if need be */
-                               if (!last_i)
+                               if (first) {
                                        last_i = i;
+                                       first = false;
+                               }
                                update_event_printk(call, map[i]);
                        }
                }
index 43d18cb..f699122 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/moduleparam.h>
 #include <linux/uaccess.h>
 #include <linux/sched/isolation.h>
+#include <linux/nmi.h>
 
 #include "workqueue_internal.h"
 
@@ -4463,6 +4464,12 @@ void show_workqueue_state(void)
                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
                                show_pwq(pwq);
                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
+                       /*
+                        * We could be printing a lot from atomic context, e.g.
+                        * sysrq-t -> show_workqueue_state(). Avoid triggering
+                        * hard lockup.
+                        */
+                       touch_nmi_watchdog();
                }
        }
 
@@ -4490,6 +4497,12 @@ void show_workqueue_state(void)
                pr_cont("\n");
        next_pool:
                spin_unlock_irqrestore(&pool->lock, flags);
+               /*
+                * We could be printing a lot from atomic context, e.g.
+                * sysrq-t -> show_workqueue_state(). Avoid triggering
+                * hard lockup.
+                */
+               touch_nmi_watchdog();
        }
 
        rcu_read_unlock_sched();
index ca5674c..7930046 100644 (file)
@@ -2857,8 +2857,11 @@ int do_swap_page(struct vm_fault *vmf)
        int ret = 0;
        bool vma_readahead = swap_use_vma_readahead();
 
-       if (vma_readahead)
+       if (vma_readahead) {
                page = swap_readahead_detect(vmf, &swap_ra);
+               swapcache = page;
+       }
+
        if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
                if (page)
                        put_page(page);
@@ -2889,9 +2892,12 @@ int do_swap_page(struct vm_fault *vmf)
 
 
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
-       if (!page)
+       if (!page) {
                page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
                                         vmf->address);
+               swapcache = page;
+       }
+
        if (!page) {
                struct swap_info_struct *si = swp_swap_info(entry);
 
index 8592543..270a821 100644 (file)
@@ -616,7 +616,6 @@ static void init_early_allocated_pages(void)
 {
        pg_data_t *pgdat;
 
-       drain_all_pages(NULL);
        for_each_online_pgdat(pgdat)
                init_zones_in_node(pgdat);
 }
index 003b2d6..4d7f988 100644 (file)
@@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
 
-       if (WARN_ONCE(dev->type != ARPHRD_CAN ||
-                     skb->len != CAN_MTU ||
-                     cfd->len > CAN_MAX_DLEN,
-                     "PF_CAN: dropped non conform CAN skbuf: "
-                     "dev type %d, len %d, datalen %d\n",
-                     dev->type, skb->len, cfd->len))
-               goto drop;
+       if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
+                    cfd->len > CAN_MAX_DLEN)) {
+               pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
+                            dev->type, skb->len, cfd->len);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
 
        can_receive(skb, dev);
        return NET_RX_SUCCESS;
-
-drop:
-       kfree_skb(skb);
-       return NET_RX_DROP;
 }
 
 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -742,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
 
-       if (WARN_ONCE(dev->type != ARPHRD_CAN ||
-                     skb->len != CANFD_MTU ||
-                     cfd->len > CANFD_MAX_DLEN,
-                     "PF_CAN: dropped non conform CAN FD skbuf: "
-                     "dev type %d, len %d, datalen %d\n",
-                     dev->type, skb->len, cfd->len))
-               goto drop;
+       if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
+                    cfd->len > CANFD_MAX_DLEN)) {
+               pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
+                            dev->type, skb->len, cfd->len);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
 
        can_receive(skb, dev);
        return NET_RX_SUCCESS;
-
-drop:
-       kfree_skb(skb);
-       return NET_RX_DROP;
 }
 
 /*
index d339ef1..1c0eb43 100644 (file)
@@ -458,6 +458,10 @@ do_pass:
                            convert_bpf_extensions(fp, &insn))
                                break;
 
+                       if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
+                           fp->code == (BPF_ALU | BPF_MOD | BPF_X))
+                               *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
+
                        *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
                        break;
 
index 15ce300..544bddf 100644 (file)
@@ -976,8 +976,8 @@ ip_proto_again:
 out_good:
        ret = true;
 
-       key_control->thoff = (u16)nhoff;
 out:
+       key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
        key_basic->n_proto = proto;
        key_basic->ip_proto = ip_proto;
 
@@ -985,7 +985,6 @@ out:
 
 out_bad:
        ret = false;
-       key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
        goto out;
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
index 9dcc392..217683d 100644 (file)
@@ -1226,8 +1226,14 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
                }
 
                if (!rcu_access_pointer(fn->leaf)) {
-                       atomic_inc(&rt->rt6i_ref);
-                       rcu_assign_pointer(fn->leaf, rt);
+                       if (fn->fn_flags & RTN_TL_ROOT) {
+                               /* put back null_entry for root node */
+                               rcu_assign_pointer(fn->leaf,
+                                           info->nl_net->ipv6.ip6_null_entry);
+                       } else {
+                               atomic_inc(&rt->rt6i_ref);
+                               rcu_assign_pointer(fn->leaf, rt);
+                       }
                }
                fn = sn;
        }
index 7726959..8735492 100644 (file)
@@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
 
        nt->dev = dev;
        nt->net = dev_net(dev);
-       ip6gre_tnl_link_config(nt, 1);
 
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       ip6gre_tnl_link_config(nt, 1);
+
        /* Can use a lockless transmit, unless we generate output sequences */
        if (!(nt->parms.o_flags & TUNNEL_SEQ))
                dev->features |= NETIF_F_LLTX;
@@ -1303,7 +1304,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 
 static int ip6gre_tap_init(struct net_device *dev)
 {
-       struct ip6_tnl *tunnel;
        int ret;
 
        ret = ip6gre_tunnel_init_common(dev);
@@ -1312,10 +1312,6 @@ static int ip6gre_tap_init(struct net_device *dev)
 
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
-       tunnel = netdev_priv(dev);
-
-       ip6gre_tnl_link_config(tunnel, 1);
-
        return 0;
 }
 
@@ -1408,12 +1404,16 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
 
        nt->dev = dev;
        nt->net = dev_net(dev);
-       ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
 
        err = register_netdevice(dev);
        if (err)
                goto out;
 
+       ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+       if (tb[IFLA_MTU])
+               ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+
        dev_hold(dev);
        ip6gre_tunnel_link(ign, nt);
 
index 47ef2d8..84a4e4c 100644 (file)
@@ -2391,6 +2391,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
        while (skb->len >= nlmsg_total_size(0)) {
                int msglen;
 
+               memset(&extack, 0, sizeof(extack));
                nlh = nlmsg_hdr(skb);
                err = 0;
 
@@ -2405,7 +2406,6 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
                if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
                        goto ack;
 
-               memset(&extack, 0, sizeof(extack));
                err = cb(skb, nlh, &extack);
                if (err == -EINTR)
                        goto skip;
index 8d78e7f..a62586e 100644 (file)
@@ -183,10 +183,17 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
        return 0;
 }
 
+static u32 cls_bpf_flags(u32 flags)
+{
+       return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
+}
+
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
                           struct cls_bpf_prog *oldprog)
 {
-       if (prog && oldprog && prog->gen_flags != oldprog->gen_flags)
+       if (prog && oldprog &&
+           cls_bpf_flags(prog->gen_flags) !=
+           cls_bpf_flags(oldprog->gen_flags))
                return -EINVAL;
 
        if (prog && tc_skip_hw(prog->gen_flags))
index e07ee3a..736719c 100644 (file)
@@ -367,8 +367,10 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
 
        crypto_info = &ctx->crypto_send;
        /* Currently we don't support set crypto info more than one time */
-       if (TLS_CRYPTO_INFO_READY(crypto_info))
+       if (TLS_CRYPTO_INFO_READY(crypto_info)) {
+               rc = -EBUSY;
                goto out;
+       }
 
        rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
        if (rc) {
@@ -386,7 +388,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
        case TLS_CIPHER_AES_GCM_128: {
                if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
                        rc = -EINVAL;
-                       goto out;
+                       goto err_crypto_info;
                }
                rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
                                    optlen - sizeof(*crypto_info));
@@ -398,7 +400,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
        }
        default:
                rc = -EINVAL;
-               goto out;
+               goto err_crypto_info;
        }
 
        /* currently SW is default, we will have ethtool in future */
@@ -454,6 +456,15 @@ static int tls_init(struct sock *sk)
        struct tls_context *ctx;
        int rc = 0;
 
+       /* The TLS ulp is currently supported only for TCP sockets
+        * in ESTABLISHED state.
+        * Supporting sockets in LISTEN state will require us
+        * to modify the accept implementation to clone rather then
+        * share the ulp context.
+        */
+       if (sk->sk_state != TCP_ESTABLISHED)
+               return -ENOTSUPP;
+
        /* allocate tls context */
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
index 9773571..61f394d 100644 (file)
@@ -681,18 +681,17 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
        }
        default:
                rc = -EINVAL;
-               goto out;
+               goto free_priv;
        }
 
        ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
        ctx->tag_size = tag_size;
        ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
        ctx->iv_size = iv_size;
-       ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
-                         GFP_KERNEL);
+       ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
        if (!ctx->iv) {
                rc = -ENOMEM;
-               goto out;
+               goto free_priv;
        }
        memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
        memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
@@ -740,7 +739,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
 
        rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
        if (!rc)
-               goto out;
+               return 0;
 
 free_aead:
        crypto_free_aead(sw_ctx->aead_send);
@@ -751,6 +750,9 @@ free_rec_seq:
 free_iv:
        kfree(ctx->iv);
        ctx->iv = NULL;
+free_priv:
+       kfree(ctx->priv_ctx);
+       ctx->priv_ctx = NULL;
 out:
        return rc;
 }
index ed87a97..542a4fc 100644 (file)
@@ -9809,7 +9809,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
         */
        if (!wdev->cqm_config->last_rssi_event_value && wdev->current_bss &&
            rdev->ops->get_station) {
-               struct station_info sinfo;
+               struct station_info sinfo = {};
                u8 *mac_addr;
 
                mac_addr = wdev->current_bss->pub.bssid;
index 7ca04a7..05186a4 100644 (file)
@@ -1254,8 +1254,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       /* we are under RTNL - globally locked - so can use a static struct */
-       static struct station_info sinfo;
+       struct station_info sinfo = {};
        u8 addr[ETH_ALEN];
        int err;
 
index 438120d..5ea0710 100755 (executable)
@@ -59,6 +59,14 @@ disas() {
                ${CROSS_COMPILE}strip $1.o
        fi
 
+       if [ "$ARCH" = "arm64" ]; then
+               if [ $width -eq 4 ]; then
+                       type=inst
+               fi
+
+               ${CROSS_COMPILE}strip $1.o
+       fi
+
        ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \
                grep -v "/tmp\|Disassembly\|\.text\|^$" > $1.dis 2>&1
 }
index 1bf949c..f6ab3cc 100644 (file)
@@ -96,6 +96,8 @@ def get_thread_info(task):
         thread_info_addr = task.address + ia64_task_size
         thread_info = thread_info_addr.cast(thread_info_ptr_type)
     else:
+        if task.type.fields()[0].type == thread_info_type.get_type():
+            return task['thread_info']
         thread_info = task['stack'].cast(thread_info_ptr_type)
     return thread_info.dereference()
 
index 6bafa54..5ed4175 100644 (file)
@@ -2593,6 +2593,29 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
+               "context stores via ST",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "BPF_ST stores into R1 context is not allowed",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "context stores via XADD",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+                                    BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "BPF_XADD stores into R1 context is not allowed",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
                "direct packet access: test1",
                .insns = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -4312,7 +4335,8 @@ static struct bpf_test tests[] = {
                .fixup_map1 = { 2 },
                .errstr_unpriv = "R2 leaks addr into mem",
                .result_unpriv = REJECT,
-               .result = ACCEPT,
+               .result = REJECT,
+               .errstr = "BPF_XADD stores into R1 context is not allowed",
        },
        {
                "leak pointer into ctx 2",
@@ -4326,7 +4350,8 @@ static struct bpf_test tests[] = {
                },
                .errstr_unpriv = "R10 leaks addr into mem",
                .result_unpriv = REJECT,
-               .result = ACCEPT,
+               .result = REJECT,
+               .errstr = "BPF_XADD stores into R1 context is not allowed",
        },
        {
                "leak pointer into ctx 3",
@@ -6707,7 +6732,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map1 = { 4 },
-               .errstr = "unbounded min value",
+               .errstr = "R0 invalid mem access 'inv'",
                .result = REJECT,
        },
        {
@@ -8609,6 +8634,127 @@ static struct bpf_test tests[] = {
                .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
+               "check deducing bounds from const, 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "check deducing bounds from const, 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 4",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "check deducing bounds from const, 5",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 6",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 7",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, ~0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "dereference of modified ctx ptr",
+       },
+       {
+               "check deducing bounds from const, 8",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, ~0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "dereference of modified ctx ptr",
+       },
+       {
+               "check deducing bounds from const, 9",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R0 tried to subtract pointer from scalar",
+       },
+       {
+               "check deducing bounds from const, 10",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+                       /* Marks reg as unknown. */
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+       },
+       {
                "bpf_exit with invalid return code. test1",
                .insns = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
index b4b69c2..9dea963 100644 (file)
@@ -1310,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
-       if (is_vm_hugetlb_page(vma) && !logging_active) {
+       if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
                hugetlb = true;
                gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
        } else {
index 6231012..743ca5c 100644 (file)
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
        if (ret)
                goto out;
 
-       ret = vgic_v4_init(kvm);
-       if (ret)
-               goto out;
+       if (vgic_has_its(kvm)) {
+               ret = vgic_v4_init(kvm);
+               if (ret)
+                       goto out;
+       }
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_vgic_vcpu_enable(vcpu);
index 4a37292..bc42651 100644 (file)
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
        int i, nr_vcpus, ret;
 
-       if (!vgic_supports_direct_msis(kvm))
+       if (!kvm_vgic_global_state.has_gicv4)
                return 0; /* Nothing to see here... move along. */
 
        if (dist->its_vm.vpes)