Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net...
authorJakub Kicinski <kuba@kernel.org>
Sat, 16 Oct 2021 00:20:15 +0000 (17:20 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sat, 16 Oct 2021 00:20:16 +0000 (17:20 -0700)
Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2021-10-14

Brett ensures RDMA nodes are removed during release and rebuild. He also
corrects fw.mgmt.api to include the patch number for proper
identification.

Dave stops ida_free() being called when an IDA has not been allocated.

Michal corrects the order of parameters being provided and the number of
entries skipped for UDP tunnels.
====================

Link: https://lore.kernel.org/r/20211014181953.3538330-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
229 files changed:
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
Documentation/gpu/amdgpu.rst
Documentation/gpu/drm-internals.rst
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm64/Kconfig
arch/arm64/mm/hugetlbpage.c
arch/ia64/Kconfig
arch/mips/Kconfig
arch/parisc/Kconfig
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/security_features.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/security.c
arch/powerpc/kernel/traps.c
arch/powerpc/lib/code-patching.c
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit64.h
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/net/bpf_jit_comp32.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/pseries/eeh_pseries.c
arch/powerpc/platforms/pseries/msi.c
arch/riscv/Kconfig
arch/riscv/include/asm/syscall.h
arch/riscv/include/asm/vdso.h
arch/riscv/include/uapi/asm/unistd.h
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/vdso.c
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/mm/cacheflush.c
arch/s390/include/asm/pci.h
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci.c
arch/s390/pci/pci_event.c
arch/x86/Kconfig
arch/x86/include/asm/entry-common.h
arch/x86/include/asm/xen/pci.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/hpet.c
arch/x86/kernel/sev-shared.c
arch/x86/pci/xen.c
arch/x86/platform/olpc/olpc.c
arch/x86/platform/pvh/enlighten.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/kmem_layout.h
arch/xtensa/kernel/irq.c
arch/xtensa/kernel/setup.c
arch/xtensa/mm/mmu.c
arch/xtensa/platforms/xtfpga/setup.c
block/bdev.c
block/blk-mq-debugfs.c
block/genhd.c
drivers/Kconfig
drivers/acpi/arm64/gtdt.c
drivers/base/test/Makefile
drivers/firmware/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/kmb/kmb_drv.h
drivers/gpu/drm/kmb/kmb_plane.c
drivers/gpu/drm/kmb/kmb_plane.h
drivers/gpu/drm/kmb/kmb_regs.h
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/nouveau/dispnv50/crc.c
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/i2c-core-acpi.c
drivers/iio/test/Makefile
drivers/iommu/Kconfig
drivers/iommu/arm/arm-smmu/Makefile
drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
drivers/media/platform/Kconfig
drivers/mmc/host/Kconfig
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/sdhci-of-at91.c
drivers/net/ipa/Kconfig
drivers/net/usb/Kconfig
drivers/net/wireless/ath/ath10k/Kconfig
drivers/of/base.c
drivers/pci/hotplug/s390_pci_hpc.c
drivers/pci/pci-acpi.c
drivers/pinctrl/qcom/Kconfig
drivers/platform/mellanox/mlxreg-io.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/dell/Kconfig
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel/int1092/intel_sar.c
drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c
drivers/platform/x86/intel_scu_ipc.c
drivers/ptp/ptp_kvm_x86.c
drivers/scsi/arm/acornscsi.c
drivers/scsi/elx/efct/efct_scsi.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/thunderbolt/Makefile
drivers/tty/hvc/hvc_xen.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/common/Kconfig
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/host/xhci-tegra.c
drivers/usb/typec/tcpm/tcpci.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tipd/core.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/gbefb.c
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/privcmd.c
fs/btrfs/ctree.h
fs/btrfs/dir-item.c
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/tree-log.c
fs/ksmbd/connection.c
fs/ksmbd/glob.h
fs/ksmbd/smb2misc.c
fs/ksmbd/smb2ops.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb2pdu.h
fs/ksmbd/smb_common.c
fs/ksmbd/smb_common.h
include/asm-generic/io.h
include/kunit/test.h
include/linux/arm-smccc.h
include/linux/qcom_scm.h
include/linux/workqueue.h
include/net/sctp/sm.h
include/net/tcp.h
include/sound/hda_codec.h
include/xen/xen-ops.h
kernel/cgroup/cpuset.c
kernel/module.c
kernel/workqueue.c
lib/Makefile
lib/kunit/executor_test.c
net/ipv4/tcp_ipv4.c
net/ipv6/ip6_output.c
net/ipv6/tcp_ipv6.c
scripts/Makefile.gcc-plugins
scripts/checksyscalls.sh
sound/core/pcm_compat.c
sound/core/seq_device.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_bind.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_controller.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_intel.h
sound/pci/hda/patch_realtek.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/objtool/arch/x86/decode.c
tools/objtool/check.c
tools/objtool/elf.c
tools/objtool/include/objtool/elf.h
tools/objtool/orc_gen.c
tools/objtool/special.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/forwarding/Makefile
tools/testing/selftests/net/forwarding/forwarding.config.sample
tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/lib.sh
tools/testing/selftests/net/nettest.c

index babbe04..4d8c27e 100644 (file)
@@ -1226,7 +1226,7 @@ PAGE_SIZE multiple when read back.
 
        Note that all fields in this file are hierarchical and the
        file modified event can be generated due to an event down the
-       hierarchy. For for the local events at the cgroup level see
+       hierarchy. For the local events at the cgroup level see
        memory.events.local.
 
          low
@@ -2170,19 +2170,19 @@ existing device files.
 
 Cgroup v2 device controller has no interface files and is implemented
 on top of cgroup BPF. To control access to device files, a user may
-create bpf programs of the BPF_CGROUP_DEVICE type and attach them
-to cgroups. On an attempt to access a device file, corresponding
-BPF programs will be executed, and depending on the return value
-the attempt will succeed or fail with -EPERM.
-
-A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
-structure, which describes the device access attempt: access type
-(mknod/read/write) and device (type, major and minor numbers).
-If the program returns 0, the attempt fails with -EPERM, otherwise
-it succeeds.
-
-An example of BPF_CGROUP_DEVICE program may be found in the kernel
-source tree in the tools/testing/selftests/bpf/progs/dev_cgroup.c file.
+create bpf programs of type BPF_PROG_TYPE_CGROUP_DEVICE and attach
+them to cgroups with BPF_CGROUP_DEVICE flag. On an attempt to access a
+device file, corresponding BPF programs will be executed, and depending
+on the return value the attempt will succeed or fail with -EPERM.
+
+A BPF_PROG_TYPE_CGROUP_DEVICE program takes a pointer to the
+bpf_cgroup_dev_ctx structure, which describes the device access attempt:
+access type (mknod/read/write) and device (type, major and minor numbers).
+If the program returns 0, the attempt fails with -EPERM, otherwise it
+succeeds.
+
+An example of BPF_PROG_TYPE_CGROUP_DEVICE program may be found in
+tools/testing/selftests/bpf/progs/dev_cgroup.c in the kernel source tree.
 
 
 RDMA
index 91ba391..43dc35f 100644 (file)
                        The VGA and EFI output is eventually overwritten by
                        the real console.
 
-                       The xen output can only be used by Xen PV guests.
+                       The xen option can only be used in Xen domains.
 
                        The sclp output can only be used on s390.
 
index 2ed010f..20ce88a 100644 (file)
@@ -22,7 +22,7 @@ properties:
     items:
       - enum:
           # ili9341 240*320 Color on stm32f429-disco board
-        - st,sf-tc240t-9370-t
+          - st,sf-tc240t-9370-t
       - const: ilitek,ili9341
 
   reg: true
index 364680c..8ba72e8 100644 (file)
@@ -300,8 +300,8 @@ pcie_replay_count
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
    :doc: pcie_replay_count
 
-+GPU SmartShift Information
-============================
+GPU SmartShift Information
+==========================
 
 GPU SmartShift information via sysfs
 
index 06af044..607f78f 100644 (file)
@@ -111,15 +111,6 @@ Component Helper Usage
 .. kernel-doc:: drivers/gpu/drm/drm_drv.c
    :doc: component helper usage recommendations
 
-IRQ Helper Library
-~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
-   :doc: irq helpers
-
-.. kernel-doc:: drivers/gpu/drm/drm_irq.c
-   :export:
-
 Memory Manager Initialization
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
index c4f2f33..703aa31 100644 (file)
@@ -9307,7 +9307,7 @@ S:        Maintained
 F:     drivers/platform/x86/intel/atomisp2/led.c
 
 INTEL BIOS SAR INT1092 DRIVER
-M:     Shravan S <s.shravan@intel.com>
+M:     Shravan Sudhakar <s.shravan@intel.com>
 M:     Intel Corporation <linuxwwan@intel.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
@@ -9629,7 +9629,7 @@ F:        include/uapi/linux/isst_if.h
 F:     tools/power/x86/intel-speed-select/
 
 INTEL STRATIX10 FIRMWARE DRIVERS
-M:     Richard Gong <richard.gong@linux.intel.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
index 7b74223..4d0c0ed 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
index fc19642..59baf6c 100644 (file)
@@ -1989,8 +1989,6 @@ config ARCH_HIBERNATION_POSSIBLE
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 if CRYPTO
 source "arch/arm/crypto/Kconfig"
 endif
index 5c7ae4c..fee914c 100644 (file)
@@ -1931,8 +1931,6 @@ source "drivers/cpufreq/Kconfig"
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "drivers/acpi/Kconfig"
 
 source "arch/arm64/kvm/Kconfig"
index 23505fc..a8158c9 100644 (file)
@@ -43,7 +43,7 @@ void __init arm64_hugetlb_cma_reserve(void)
 #ifdef CONFIG_ARM64_4K_PAGES
        order = PUD_SHIFT - PAGE_SHIFT;
 #else
-       order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT;
+       order = CONT_PMD_SHIFT - PAGE_SHIFT;
 #endif
        /*
         * HugeTLB CMA reservation is required for gigantic
index 045792c..1e33666 100644 (file)
@@ -388,8 +388,6 @@ config CRASH_DUMP
          help
            Generate crash dump after being started by kexec.
 
-source "drivers/firmware/Kconfig"
-
 endmenu
 
 menu "Power management and ACPI options"
index 771ca53..6b8f591 100644 (file)
@@ -3316,8 +3316,6 @@ source "drivers/cpuidle/Kconfig"
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "arch/mips/kvm/Kconfig"
 
 source "arch/mips/vdso/Kconfig"
index 4742b6f..27a8b49 100644 (file)
@@ -384,6 +384,4 @@ config KEXEC_FILE
 
 endmenu
 
-source "drivers/firmware/Kconfig"
-
 source "drivers/parisc/Kconfig"
index d4b145b..9f38040 100644 (file)
@@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
        if (kuap_is_disabled())
                return;
 
+       if (unlikely(kuap != KUAP_NONE)) {
+               current->thread.kuap = KUAP_NONE;
+               kuap_lock(kuap, false);
+       }
+
+       if (likely(regs->kuap == KUAP_NONE))
+               return;
+
        current->thread.kuap = regs->kuap;
 
        kuap_unlock(regs->kuap, false);
index a95f637..4ba8345 100644 (file)
@@ -23,6 +23,7 @@
 #define BRANCH_ABSOLUTE        0x2
 
 bool is_offset_in_branch_range(long offset);
+bool is_offset_in_cond_branch_range(long offset);
 int create_branch(struct ppc_inst *instr, const u32 *addr,
                  unsigned long target, int flags);
 int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
index 6b800d3..a1d2382 100644 (file)
@@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
        local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 
-       if (is_implicit_soft_masked(regs)) {
-               // Adjust regs->softe soft implicit soft-mask, so
-               // arch_irq_disabled_regs(regs) behaves as expected.
+       if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
+               /*
+                * Adjust regs->softe to be soft-masked if it had not been
+                * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
+                * not yet set disabled), or if it was in an implicit soft
+                * masked state. This makes arch_irq_disabled_regs(regs)
+                * behave as expected.
+                */
                regs->softe = IRQS_ALL_DISABLED;
        }
-       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
-               BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
 
        /* Don't do any per-CPU operations until interrupt state is fixed */
 
@@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
 /* kernel/traps.c */
 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
 #ifdef CONFIG_PPC_BOOK3S_64
-DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
-#else
-DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
 #endif
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
 DECLARE_INTERRUPT_HANDLER(SMIException);
 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
 DECLARE_INTERRUPT_HANDLER(unknown_exception);
index 792eefa..27574f2 100644 (file)
@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
        return !!(powerpc_security_features & feature);
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+enum stf_barrier_type stf_barrier_type_get(void);
+#else
+static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
+#endif
 
 // Features indicating support for Spectre/Meltdown mitigations
 
index 111249f..038ce8d 100644 (file)
@@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
        struct iommu_table *tbl = get_iommu_table_base(dev);
        u64 mask;
 
+       if (dev_is_pci(dev)) {
+               u64 bypass_mask = dma_direct_get_required_mask(dev);
+
+               if (dma_iommu_dma_supported(dev, bypass_mask)) {
+                       dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
+                       return bypass_mask;
+               }
+       }
+
        if (!tbl)
                return 0;
 
index 37859e6..eaf1f72 100644 (file)
@@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
        li      r10,MSR_RI
        mtmsrd  r10,1
        addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      machine_check_exception
+       bl      machine_check_exception_async
        b       interrupt_return_srr
 
 
@@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
        subi    r12,r12,1
        sth     r12,PACA_IN_MCE(r13)
 
-       /* Invoke machine_check_exception to print MCE event and panic. */
+       /*
+        * Invoke machine_check_exception to print MCE event and panic.
+        * This is the NMI version of the handler because we are called from
+        * the early handler which is a true NMI.
+        */
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      machine_check_exception
 
@@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
         */
 
        andi.   r10,r12,MSR_PR
-       bne     2f                      /* If userspace, go normal path */
+       bne     .Lnormal_stack          /* If userspace, go normal path */
 
        andis.  r10,r12,(SRR1_PROGTM)@h
-       bne     1f                      /* If TM, emergency             */
+       bne     .Lemergency_stack       /* If TM, emergency             */
 
        cmpdi   r1,-INT_FRAME_SIZE      /* check if r1 is in userspace  */
-       blt     2f                      /* normal path if not           */
+       blt     .Lnormal_stack          /* normal path if not           */
 
        /* Use the emergency stack                                      */
-1:     andi.   r10,r12,MSR_PR          /* Set CR0 correctly for label  */
+.Lemergency_stack:
+       andi.   r10,r12,MSR_PR          /* Set CR0 correctly for label  */
                                        /* 3 in EXCEPTION_PROLOG_COMMON */
        mr      r10,r1                  /* Save r1                      */
        ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
        subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
        __ISTACK(program_check)=0
        __GEN_COMMON_BODY program_check
-       b 3f
-2:
+       b .Ldo_program_check
+
+.Lnormal_stack:
        __ISTACK(program_check)=1
        __GEN_COMMON_BODY program_check
-3:
+
+.Ldo_program_check:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      program_check_exception
        REST_NVGPRS(r1) /* instruction emulation may change GPRs */
index 551b653..c4f1d6b 100644 (file)
@@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
                return;
        }
 
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(in_nmi() || in_hardirq());
+
        /*
         * After the stb, interrupts are unmasked and there are no interrupts
         * pending replay. The restart sequence makes this atomic with
@@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
        if (mask)
                return;
 
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(in_nmi() || in_hardirq());
+
        /*
         * From this point onward, we can take interrupts, preempt,
         * etc... unless we got hard-disabled. We check if an event
index 1a99849..15fb5ea 100644 (file)
@@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
 
 early_param("no_stf_barrier", handle_no_stf_barrier);
 
+enum stf_barrier_type stf_barrier_type_get(void)
+{
+       return stf_enabled_flush_types;
+}
+
 /* This is the generic flag used by other architectures */
 static int __init handle_ssbd(char *p)
 {
index aac8c04..1174170 100644 (file)
@@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
                return false;
        }
 
-       show_signal_msg(signr, regs, code, addr);
+       /*
+        * Must not enable interrupts even for user-mode exception, because
+        * this can be called from machine check, which may be a NMI or IRQ
+        * which don't like interrupts being enabled. Could check for
+        * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
+        * reason why _exception() should enable irqs for an exception handler,
+        * the handlers themselves do that directly.
+        */
 
-       if (arch_irqs_disabled())
-               interrupt_cond_local_irq_enable(regs);
+       show_signal_msg(signr, regs, code, addr);
 
        current->thread.trap_nr = code;
 
@@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
         * do_exit() checks for in_interrupt() and panics in that case, so
         * exit the irq/nmi before calling die.
         */
-       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
-               irq_exit();
-       else
+       if (in_nmi())
                nmi_exit();
+       else
+               irq_exit();
        die(str, regs, err);
 }
 
 /*
- * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
  * (it uses its own early real-mode handler to handle the MCE proper
  * and then raises irq_work to call this handler when interrupts are
- * enabled).
+ * enabled). The only time when this is not true is if the early handler
+ * is unrecoverable, then it does call this directly to try to get a
+ * message out.
  */
-#ifdef CONFIG_PPC_BOOK3S_64
-DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
-#else
-DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
-#endif
+static void __machine_check_exception(struct pt_regs *regs)
 {
        int recover = 0;
 
@@ -841,12 +845,19 @@ bail:
        /* Must die if the interrupt is not recoverable */
        if (regs_is_unrecoverable(regs))
                die_mce("Unrecoverable Machine check", regs, SIGBUS);
+}
 
 #ifdef CONFIG_PPC_BOOK3S_64
-       return;
-#else
-       return 0;
+DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
+{
+       __machine_check_exception(regs);
+}
 #endif
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+{
+       __machine_check_exception(regs);
+
+       return 0;
 }
 
 DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
index f9a3019..c5ed988 100644 (file)
@@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
        return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
 }
 
+bool is_offset_in_cond_branch_range(long offset)
+{
+       return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
 /*
  * Helper to check if a given instruction is a conditional branch
  * Derived from the conditional checks in analyse_instr()
@@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
                offset = offset - (unsigned long)addr;
 
        /* Check we can represent the target in the instruction format */
-       if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
+       if (!is_offset_in_cond_branch_range(offset))
                return 1;
 
        /* Mask out the flags and target, so they don't step on each other. */
index 99fad09..7e9b978 100644 (file)
 #define EMIT(instr)            PLANT_INSTR(image, ctx->idx, instr)
 
 /* Long jump; (unconditional 'branch') */
-#define PPC_JMP(dest)          EMIT(PPC_INST_BRANCH |                        \
-                                    (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+#define PPC_JMP(dest)                                                        \
+       do {                                                                  \
+               long offset = (long)(dest) - (ctx->idx * 4);                  \
+               if (!is_offset_in_branch_range(offset)) {                     \
+                       pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);                       \
+                       return -ERANGE;                                       \
+               }                                                             \
+               EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc));                \
+       } while (0)
+
 /* blr; (unconditional 'branch' with link) to absolute address */
 #define PPC_BL_ABS(dest)       EMIT(PPC_INST_BL |                            \
                                     (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
 /* "cond" here covers BO:BI fields. */
-#define PPC_BCC_SHORT(cond, dest)      EMIT(PPC_INST_BRANCH_COND |           \
-                                            (((cond) & 0x3ff) << 16) |       \
-                                            (((dest) - (ctx->idx * 4)) &     \
-                                             0xfffc))
+#define PPC_BCC_SHORT(cond, dest)                                            \
+       do {                                                                  \
+               long offset = (long)(dest) - (ctx->idx * 4);                  \
+               if (!is_offset_in_cond_branch_range(offset)) {                \
+                       pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);           \
+                       return -ERANGE;                                       \
+               }                                                             \
+               EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc));                                      \
+       } while (0)
+
 /* Sign-extended 32-bit immediate load */
 #define PPC_LI32(d, i)         do {                                          \
                if ((int)(uintptr_t)(i) >= -32768 &&                          \
 #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
 #endif
 
-static inline bool is_nearbranch(int offset)
-{
-       return (offset < 32768) && (offset >= -32768);
-}
-
 /*
  * The fly in the ointment of code size changing from pass to pass is
  * avoided by padding the short branch case with a NOP.         If code size differs
@@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
  * state.
  */
 #define PPC_BCC(cond, dest)    do {                                          \
-               if (is_nearbranch((dest) - (ctx->idx * 4))) {                 \
+               if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) {    \
                        PPC_BCC_SHORT(cond, dest);                            \
                        EMIT(PPC_RAW_NOP());                                  \
                } else {                                                      \
index 7b713ed..b63b35e 100644 (file)
  * with our redzone usage.
  *
  *             [       prev sp         ] <-------------
- *             [   nv gpr save area    ] 6*8           |
+ *             [   nv gpr save area    ] 5*8           |
  *             [    tail_call_cnt      ] 8             |
- *             [    local_tmp_var      ]             |
+ *             [    local_tmp_var      ] 16            |
  * fp (r31) -->        [   ebpf stack space    ] upto 512      |
  *             [     frame header      ] 32/112        |
  * sp (r1) --->        [    stack pointer      ] --------------
  */
 
 /* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE     (6*8)
+#define BPF_PPC_STACK_SAVE     (5*8)
 /* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS   16
+#define BPF_PPC_STACK_LOCALS   24
 /* stack frame excluding BPF stack, ensure this is quadword aligned */
 #define BPF_PPC_STACKFRAME     (STACK_FRAME_MIN_SIZE + \
                                 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
index 53aefee..fcbf7a9 100644 (file)
@@ -210,7 +210,11 @@ skip_init_ctx:
                /* Now build the prologue, body code & epilogue for real. */
                cgctx.idx = 0;
                bpf_jit_build_prologue(code_base, &cgctx);
-               bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
+               if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
+                       bpf_jit_binary_free(bpf_hdr);
+                       fp = org_fp;
+                       goto out_addrs;
+               }
                bpf_jit_build_epilogue(code_base, &cgctx);
 
                if (bpf_jit_enable > 1)
index beb12cb..0da31d4 100644 (file)
@@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
        }
 }
 
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 {
        /*
         * By now, the eBPF program has already setup parameters in r3-r6
@@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        bpf_jit_emit_common_epilogue(image, ctx);
 
        EMIT(PPC_RAW_BCTR());
+
        /* out: */
+       return 0;
 }
 
 /* Assemble the body code between the prologue & epilogue */
@@ -355,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                                PPC_LI32(_R0, imm);
                                EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
                        }
-                       if (imm >= 0)
+                       if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
                                EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
                        else
                                EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
@@ -623,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        EMIT(PPC_RAW_LI(dst_reg_h, 0));
                        break;
                case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
-                       EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
+                       EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
                        break;
                case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
                        bpf_set_seen_register(ctx, tmp_reg);
@@ -1073,7 +1075,7 @@ cond_branch:
                                break;
                        case BPF_JMP32 | BPF_JSET | BPF_K:
                                /* andi does not sign-extend the immediate */
-                               if (imm >= -32768 && imm < 32768) {
+                               if (imm >= 0 && imm < 32768) {
                                        /* PPC_ANDI is _only/always_ dot-form */
                                        EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
                                } else {
@@ -1090,7 +1092,9 @@ cond_branch:
                 */
                case BPF_JMP | BPF_TAIL_CALL:
                        ctx->seen |= SEEN_TAILCALL;
-                       bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       if (ret < 0)
+                               return ret;
                        break;
 
                default:
@@ -1103,7 +1107,7 @@ cond_branch:
                        return -EOPNOTSUPP;
                }
                if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
-                   !insn_is_zext(&insn[i + 1]))
+                   !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
                        EMIT(PPC_RAW_LI(dst_reg_h, 0));
        }
 
index b87a63d..8b5157c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/if_vlan.h>
 #include <asm/kprobes.h>
 #include <linux/bpf.h>
+#include <asm/security_features.h>
 
 #include "bpf_jit64.h"
 
@@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  *             [       prev sp         ] <-------------
  *             [         ...           ]               |
  * sp (r1) --->        [    stack pointer      ] --------------
- *             [   nv gpr save area    ] 6*8
+ *             [   nv gpr save area    ] 5*8
  *             [    tail_call_cnt      ] 8
- *             [    local_tmp_var      ] 8
+ *             [    local_tmp_var      ] 16
  *             [   unused red zone     ] 208 bytes protected
  */
 static int bpf_jit_stack_local(struct codegen_context *ctx)
@@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
        if (bpf_has_stack_frame(ctx))
                return STACK_FRAME_MIN_SIZE + ctx->stack_size;
        else
-               return -(BPF_PPC_STACK_SAVE + 16);
+               return -(BPF_PPC_STACK_SAVE + 24);
 }
 
 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
 {
-       return bpf_jit_stack_local(ctx) + 8;
+       return bpf_jit_stack_local(ctx) + 16;
 }
 
 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
        EMIT(PPC_RAW_BCTRL());
 }
 
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 {
        /*
         * By now, the eBPF program has already setup parameters in r3, r4 and r5
@@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        bpf_jit_emit_common_epilogue(image, ctx);
 
        EMIT(PPC_RAW_BCTR());
+
        /* out: */
+       return 0;
 }
 
+/*
+ * We spill into the redzone always, even if the bpf program has its own stackframe.
+ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
+ */
+void bpf_stf_barrier(void);
+
+asm (
+"              .global bpf_stf_barrier         ;"
+"      bpf_stf_barrier:                        ;"
+"              std     21,-64(1)               ;"
+"              std     22,-56(1)               ;"
+"              sync                            ;"
+"              ld      21,-64(1)               ;"
+"              ld      22,-56(1)               ;"
+"              ori     31,31,0                 ;"
+"              .rept 14                        ;"
+"              b       1f                      ;"
+"      1:                                      ;"
+"              .endr                           ;"
+"              blr                             ;"
+);
+
 /* Assemble the body code between the prologue & epilogue */
 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
                       u32 *addrs, bool extra_pass)
 {
+       enum stf_barrier_type stf_barrier = stf_barrier_type_get();
        const struct bpf_insn *insn = fp->insnsi;
        int flen = fp->len;
        int i, ret;
@@ -328,18 +354,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
                        goto bpf_alu32_trunc;
                case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
-               case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
                case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+                       if (!imm) {
+                               goto bpf_alu32_trunc;
+                       } else if (imm >= -32768 && imm < 32768) {
+                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+                       } else {
+                               PPC_LI32(b2p[TMP_REG_1], imm);
+                               EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
+                       }
+                       goto bpf_alu32_trunc;
+               case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
                case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
-                       if (BPF_OP(code) == BPF_SUB)
-                               imm = -imm;
-                       if (imm) {
-                               if (imm >= -32768 && imm < 32768)
-                                       EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
-                               else {
-                                       PPC_LI32(b2p[TMP_REG_1], imm);
-                                       EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
-                               }
+                       if (!imm) {
+                               goto bpf_alu32_trunc;
+                       } else if (imm > -32768 && imm <= 32768) {
+                               EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+                       } else {
+                               PPC_LI32(b2p[TMP_REG_1], imm);
+                               EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
                        }
                        goto bpf_alu32_trunc;
                case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
@@ -389,8 +422,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
                        if (imm == 0)
                                return -EINVAL;
-                       else if (imm == 1)
-                               goto bpf_alu32_trunc;
+                       if (imm == 1) {
+                               if (BPF_OP(code) == BPF_DIV) {
+                                       goto bpf_alu32_trunc;
+                               } else {
+                                       EMIT(PPC_RAW_LI(dst_reg, 0));
+                                       break;
+                               }
+                       }
 
                        PPC_LI32(b2p[TMP_REG_1], imm);
                        switch (BPF_CLASS(code)) {
@@ -631,6 +670,29 @@ emit_clear:
                 * BPF_ST NOSPEC (speculation barrier)
                 */
                case BPF_ST | BPF_NOSPEC:
+                       if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
+                                       !security_ftr_enabled(SEC_FTR_STF_BARRIER))
+                               break;
+
+                       switch (stf_barrier) {
+                       case STF_BARRIER_EIEIO:
+                               EMIT(PPC_RAW_EIEIO() | 0x02000000);
+                               break;
+                       case STF_BARRIER_SYNC_ORI:
+                               EMIT(PPC_RAW_SYNC());
+                               EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
+                               EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+                               break;
+                       case STF_BARRIER_FALLBACK:
+                               EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
+                               PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+                               EMIT(PPC_RAW_MTCTR(12));
+                               EMIT(PPC_RAW_BCTRL());
+                               EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+                               break;
+                       case STF_BARRIER_NONE:
+                               break;
+                       }
                        break;
 
                /*
@@ -993,7 +1055,9 @@ cond_branch:
                 */
                case BPF_JMP | BPF_TAIL_CALL:
                        ctx->seen |= SEEN_TAILCALL;
-                       bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+                       if (ret < 0)
+                               return ret;
                        break;
 
                default:
index bc15200..09fafcf 100644 (file)
@@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
        if (is_kdump_kernel() || reset_devices) {
                pr_info("Issue PHB reset ...\n");
                list_for_each_entry(phb, &hose_list, list_node) {
+                       // Skip if the slot is empty
+                       if (list_empty(&PCI_DN(phb->dn)->child_list))
+                               continue;
+
                        pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
                        config_addr = pseries_eeh_get_pe_config_addr(pdn);
 
index 1b305e4..8627362 100644 (file)
@@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d)
        irq_chip_unmask_parent(d);
 }
 
+static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+       struct msi_desc *entry = irq_data_get_msi_desc(data);
+
+       /*
+        * Do not update the MSIx vector table. It's not strictly necessary
+        * because the table is initialized by the underlying hypervisor, PowerVM
+        * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
+        * activation will fail. This can happen in some drivers (eg. IPR) which
+        * deactivate an IRQ used for testing MSI support.
+        */
+       entry->msg = *msg;
+}
+
 static struct irq_chip pseries_pci_msi_irq_chip = {
        .name           = "pSeries-PCI-MSI",
        .irq_shutdown   = pseries_msi_shutdown,
        .irq_mask       = pseries_msi_mask,
        .irq_unmask     = pseries_msi_unmask,
        .irq_eoi        = irq_chip_eoi_parent,
+       .irq_write_msi_msg      = pseries_msi_write_msg,
 };
 
 static struct msi_domain_info pseries_msi_domain_info = {
index 301a542..6a6fa9e 100644 (file)
@@ -561,5 +561,3 @@ menu "Power management options"
 source "kernel/power/Kconfig"
 
 endmenu
-
-source "drivers/firmware/Kconfig"
index b933b15..34fbb3e 100644 (file)
@@ -82,4 +82,5 @@ static inline int syscall_get_arch(struct task_struct *task)
 #endif
 }
 
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
 #endif /* _ASM_RISCV_SYSCALL_H */
index 893e471..208e31b 100644 (file)
 #ifdef CONFIG_MMU
 
 #include <linux/types.h>
-#include <generated/vdso-offsets.h>
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefor don't have one.
+ */
+#ifdef CONFIG_MMU
+
+#define __VVAR_PAGES    1
 
-#ifndef CONFIG_GENERIC_TIME_VSYSCALL
-struct vdso_data {
-};
-#endif
+#ifndef __ASSEMBLY__
+#include <generated/vdso-offsets.h>
 
 #define VDSO_SYMBOL(base, name)                                                        \
        (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
 
 #endif /* CONFIG_MMU */
 
-asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
 
 #endif /* _ASM_RISCV_VDSO_H */
index 4b989ae..8062996 100644 (file)
 #ifdef __LP64__
 #define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_CLONE3
 #endif /* __LP64__ */
 
+#define __ARCH_WANT_SYS_CLONE3
+
 #include <asm-generic/unistd.h>
 
 /*
index a63c667..44b1420 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/linkage.h>
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
-#include <asm/vdso.h>
 #include <asm/syscall.h>
 
 #undef __SYSCALL
index 25a3b88..b70956d 100644 (file)
 #include <linux/binfmts.h>
 #include <linux/err.h>
 #include <asm/page.h>
+#include <asm/vdso.h>
+
 #ifdef CONFIG_GENERIC_TIME_VSYSCALL
 #include <vdso/datapage.h>
 #else
-#include <asm/vdso.h>
+struct vdso_data {
+};
 #endif
 
 extern char vdso_start[], vdso_end[];
 
+enum vvar_pages {
+       VVAR_DATA_PAGE_OFFSET,
+       VVAR_NR_PAGES,
+};
+
+#define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
+
 static unsigned int vdso_pages __ro_after_init;
 static struct page **vdso_pagelist __ro_after_init;
 
@@ -38,7 +48,7 @@ static int __init vdso_init(void)
 
        vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
        vdso_pagelist =
-               kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+               kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
        if (unlikely(vdso_pagelist == NULL)) {
                pr_err("vdso: pagelist allocation failed\n");
                return -ENOMEM;
@@ -63,38 +73,41 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
        unsigned long vdso_base, vdso_len;
        int ret;
 
-       vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+       BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+
+       vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
+
+       if (mmap_write_lock_killable(mm))
+               return -EINTR;
 
-       mmap_write_lock(mm);
        vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
        if (IS_ERR_VALUE(vdso_base)) {
                ret = vdso_base;
                goto end;
        }
 
-       /*
-        * Put vDSO base into mm struct. We need to do this before calling
-        * install_special_mapping or the perf counter mmap tracking code
-        * will fail to recognise it as a vDSO (since arch_vma_name fails).
-        */
-       mm->context.vdso = (void *)vdso_base;
+       mm->context.vdso = NULL;
+       ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
+               (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+       if (unlikely(ret))
+               goto end;
 
        ret =
-          install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+          install_special_mapping(mm, vdso_base + VVAR_SIZE,
+               vdso_pages << PAGE_SHIFT,
                (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
                vdso_pagelist);
 
-       if (unlikely(ret)) {
-               mm->context.vdso = NULL;
+       if (unlikely(ret))
                goto end;
-       }
 
-       vdso_base += (vdso_pages << PAGE_SHIFT);
-       ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
-               (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+       /*
+        * Put vDSO base into mm struct. We need to do this before calling
+        * install_special_mapping or the perf counter mmap tracking code
+        * will fail to recognise it as a vDSO (since arch_vma_name fails).
+        */
+       mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
 
-       if (unlikely(ret))
-               mm->context.vdso = NULL;
 end:
        mmap_write_unlock(mm);
        return ret;
@@ -105,7 +118,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
        if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
                return "[vdso]";
        if (vma->vm_mm && (vma->vm_start ==
-                          (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+                          (long)vma->vm_mm->context.vdso - VVAR_SIZE))
                return "[vdso_data]";
        return NULL;
 }
index e6f558b..e9111f7 100644 (file)
@@ -3,12 +3,13 @@
  * Copyright (C) 2012 Regents of the University of California
  */
 #include <asm/page.h>
+#include <asm/vdso.h>
 
 OUTPUT_ARCH(riscv)
 
 SECTIONS
 {
-       PROVIDE(_vdso_data = . + PAGE_SIZE);
+       PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
        . = SIZEOF_HEADERS;
 
        .hash           : { *(.hash) }                  :text
index 0941186..89f8106 100644 (file)
@@ -16,6 +16,8 @@ static void ipi_remote_fence_i(void *info)
 
 void flush_icache_all(void)
 {
+       local_flush_icache_all();
+
        if (IS_ENABLED(CONFIG_RISCV_SBI))
                sbi_remote_fence_i(NULL);
        else
index e4803ec..6b3c366 100644 (file)
@@ -207,6 +207,8 @@ int zpci_enable_device(struct zpci_dev *);
 int zpci_disable_device(struct zpci_dev *);
 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
 int zpci_deconfigure_device(struct zpci_dev *zdev);
+void zpci_device_reserved(struct zpci_dev *zdev);
+bool zpci_is_device_configured(struct zpci_dev *zdev);
 
 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
 int zpci_unregister_ioat(struct zpci_dev *, u8);
index 840d859..1a374d0 100644 (file)
@@ -1826,7 +1826,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
        jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
        if (jit.addrs == NULL) {
                fp = orig_fp;
-               goto out;
+               goto free_addrs;
        }
        /*
         * Three initial passes:
index e7e6788..b833155 100644 (file)
@@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
        spin_unlock(&zpci_list_lock);
 
        list_for_each_entry_safe(zdev, tmp, &remove, entry)
-               zpci_zdev_put(zdev);
+               zpci_device_reserved(zdev);
 }
 
 int pci_domain_nr(struct pci_bus *bus)
@@ -751,6 +751,14 @@ error:
        return ERR_PTR(rc);
 }
 
+bool zpci_is_device_configured(struct zpci_dev *zdev)
+{
+       enum zpci_state state = zdev->state;
+
+       return state != ZPCI_FN_STATE_RESERVED &&
+               state != ZPCI_FN_STATE_STANDBY;
+}
+
 /**
  * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
  * @zdev: The zpci_dev to be configured
@@ -822,6 +830,31 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
        return 0;
 }
 
+/**
+ * zpci_device_reserved() - Mark device as resverved
+ * @zdev: the zpci_dev that was reserved
+ *
+ * Handle the case that a given zPCI function was reserved by another system.
+ * After a call to this function the zpci_dev can not be found via
+ * get_zdev_by_fid() anymore but may still be accessible via existing
+ * references though it will not be functional anymore.
+ */
+void zpci_device_reserved(struct zpci_dev *zdev)
+{
+       if (zdev->has_hp_slot)
+               zpci_exit_slot(zdev);
+       /*
+        * Remove device from zpci_list as it is going away. This also
+        * makes sure we ignore subsequent zPCI events for this device.
+        */
+       spin_lock(&zpci_list_lock);
+       list_del(&zdev->entry);
+       spin_unlock(&zpci_list_lock);
+       zdev->state = ZPCI_FN_STATE_RESERVED;
+       zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+       zpci_zdev_put(zdev);
+}
+
 void zpci_release_device(struct kref *kref)
 {
        struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
@@ -843,6 +876,12 @@ void zpci_release_device(struct kref *kref)
        case ZPCI_FN_STATE_STANDBY:
                if (zdev->has_hp_slot)
                        zpci_exit_slot(zdev);
+               spin_lock(&zpci_list_lock);
+               list_del(&zdev->entry);
+               spin_unlock(&zpci_list_lock);
+               zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+               fallthrough;
+       case ZPCI_FN_STATE_RESERVED:
                if (zdev->has_resources)
                        zpci_cleanup_bus_resources(zdev);
                zpci_bus_device_unregister(zdev);
@@ -851,10 +890,6 @@ void zpci_release_device(struct kref *kref)
        default:
                break;
        }
-
-       spin_lock(&zpci_list_lock);
-       list_del(&zdev->entry);
-       spin_unlock(&zpci_list_lock);
        zpci_dbg(3, "rem fid:%x\n", zdev->fid);
        kfree(zdev);
 }
index c856f80..5b8d647 100644 (file)
@@ -140,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
                        /* The 0x0304 event may immediately reserve the device */
                        if (!clp_get_state(zdev->fid, &state) &&
                            state == ZPCI_FN_STATE_RESERVED) {
-                               zpci_zdev_put(zdev);
+                               zpci_device_reserved(zdev);
                        }
                }
                break;
@@ -151,7 +151,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        case 0x0308: /* Standby -> Reserved */
                if (!zdev)
                        break;
-               zpci_zdev_put(zdev);
+               zpci_device_reserved(zdev);
                break;
        default:
                break;
index ab83c22..bd70e8a 100644 (file)
@@ -1405,7 +1405,7 @@ config HIGHMEM4G
 
 config HIGHMEM64G
        bool "64GB"
-       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
        select X86_PAE
        help
          Select this if you have a 32-bit processor and more than 4
@@ -2832,8 +2832,6 @@ config HAVE_ATOMIC_IOMAP
        def_bool y
        depends on X86_32
 
-source "drivers/firmware/Kconfig"
-
 source "arch/x86/kvm/Kconfig"
 
 source "arch/x86/Kconfig.assembler"
index 14ebd21..4318464 100644 (file)
@@ -25,7 +25,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
                 * For !SMAP hardware we patch out CLAC on entry.
                 */
                if (boot_cpu_has(X86_FEATURE_SMAP) ||
-                   (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+                   (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
                        mask |= X86_EFLAGS_AC;
 
                WARN_ON_ONCE(flags & mask);
index 3506d8c..4557f7c 100644 (file)
@@ -14,16 +14,19 @@ static inline int pci_xen_hvm_init(void)
        return -1;
 }
 #endif
-#if defined(CONFIG_XEN_DOM0)
+#ifdef CONFIG_XEN_PV_DOM0
 int __init pci_xen_initial_domain(void);
-int xen_find_device_domain_owner(struct pci_dev *dev);
-int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
-int xen_unregister_device_domain_owner(struct pci_dev *dev);
 #else
 static inline int __init pci_xen_initial_domain(void)
 {
        return -1;
 }
+#endif
+#ifdef CONFIG_XEN_DOM0
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
 static inline int xen_find_device_domain_owner(struct pci_dev *dev)
 {
        return -1;
index 0f88859..b3410f1 100644 (file)
@@ -326,6 +326,7 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_SMAP
                cr4_set_bits(X86_CR4_SMAP);
 #else
+               clear_cpu_cap(c, X86_FEATURE_SMAP);
                cr4_clear_bits(X86_CR4_SMAP);
 #endif
        }
index 4b8813b..bb1c3f5 100644 (file)
@@ -527,12 +527,14 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
        rdt_domain_reconfigure_cdp(r);
 
        if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
-               kfree(d);
+               kfree(hw_dom);
                return;
        }
 
        if (r->mon_capable && domain_setup_mon_state(r, d)) {
-               kfree(d);
+               kfree(hw_dom->ctrl_val);
+               kfree(hw_dom->mbps_val);
+               kfree(hw_dom);
                return;
        }
 
index 38837da..391a4e2 100644 (file)
@@ -714,12 +714,6 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x3e20,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x3ec4,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
-       { PCI_VENDOR_ID_INTEL, 0x8a12,
-               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
        { PCI_VENDOR_ID_BROADCOM, 0x4331,
          PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
index 445c57c..fa17a27 100644 (file)
@@ -379,9 +379,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
                                     sizeof(fpu->state.fxsave)))
                        return -EFAULT;
 
-               /* Reject invalid MXCSR values. */
-               if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
-                       return -EINVAL;
+               if (IS_ENABLED(CONFIG_X86_64)) {
+                       /* Reject invalid MXCSR values. */
+                       if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
+                               return -EINVAL;
+               } else {
+                       /* Mask invalid bits out for historical reasons (broken hardware). */
+                       fpu->state.fxsave.mxcsr &= ~mxcsr_feature_mask;
+               }
 
                /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
                if (use_xsave())
index 42fc41d..882213d 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/irq_remapping.h>
 #include <asm/hpet.h>
 #include <asm/time.h>
+#include <asm/mwait.h>
 
 #undef  pr_fmt
 #define pr_fmt(fmt) "hpet: " fmt
@@ -916,6 +917,83 @@ static bool __init hpet_counting(void)
        return false;
 }
 
+static bool __init mwait_pc10_supported(void)
+{
+       unsigned int eax, ebx, ecx, mwait_substates;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return false;
+
+       if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
+               return false;
+
+       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return false;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+       return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
+              (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
+              (mwait_substates & (0xF << 28));
+}
+
+/*
+ * Check whether the system supports PC10. If so force disable HPET as that
+ * stops counting in PC10. This check is overbroad as it does not take any
+ * of the following into account:
+ *
+ *     - ACPI tables
+ *     - Enablement of intel_idle
+ *     - Command line arguments which limit intel_idle C-state support
+ *
+ * That's perfectly fine. HPET is a piece of hardware designed by committee
+ * and the only reasons why it is still in use on modern systems is the
+ * fact that it is impossible to reliably query TSC and CPU frequency via
+ * CPUID or firmware.
+ *
+ * If HPET is functional it is useful for calibrating TSC, but this can be
+ * done via PMTIMER as well which seems to be the last remaining timer on
+ * X86/INTEL platforms that has not been completely wreckaged by feature
+ * creep.
+ *
+ * In theory HPET support should be removed altogether, but there are older
+ * systems out there which depend on it because TSC and APIC timer are
+ * dysfunctional in deeper C-states.
+ *
+ * It's only 20 years now that hardware people have been asked to provide
+ * reliable and discoverable facilities which can be used for timekeeping
+ * and per CPU timer interrupts.
+ *
+ * The probability that this problem is going to be solved in the
+ * forseeable future is close to zero, so the kernel has to be cluttered
+ * with heuristics to keep up with the ever growing amount of hardware and
+ * firmware trainwrecks. Hopefully some day hardware people will understand
+ * that the approach of "This can be fixed in software" is not sustainable.
+ * Hope dies last...
+ */
+static bool __init hpet_is_pc10_damaged(void)
+{
+       unsigned long long pcfg;
+
+       /* Check whether PC10 substates are supported */
+       if (!mwait_pc10_supported())
+               return false;
+
+       /* Check whether PC10 is enabled in PKG C-state limit */
+       rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
+       if ((pcfg & 0xF) < 8)
+               return false;
+
+       if (hpet_force_user) {
+               pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
+               return false;
+       }
+
+       pr_info("HPET dysfunctional in PC10. Force disabled.\n");
+       boot_hpet_disable = true;
+       return true;
+}
+
 /**
  * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  */
@@ -929,6 +1007,9 @@ int __init hpet_enable(void)
        if (!is_hpet_capable())
                return 0;
 
+       if (hpet_is_pc10_damaged())
+               return 0;
+
        hpet_set_mapping();
        if (!hpet_virt_address)
                return 0;
index 9f90f46..bf1033a 100644 (file)
@@ -130,6 +130,8 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
                } else {
                        ret = ES_VMM_ERROR;
                }
+       } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
+               ret = ES_VMM_ERROR;
        } else {
                ret = ES_OK;
        }
index 3d41a09..5debe4a 100644 (file)
@@ -113,7 +113,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
                                 false /* no mapping of GSI to PIRQ */);
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 static int xen_register_gsi(u32 gsi, int triggering, int polarity)
 {
        int rc, irq;
@@ -261,7 +261,7 @@ error:
        return irq;
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 static bool __read_mostly pci_seg_supported = true;
 
 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -375,10 +375,10 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
                WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
        }
 }
-#else /* CONFIG_XEN_DOM0 */
+#else /* CONFIG_XEN_PV_DOM0 */
 #define xen_initdom_setup_msi_irqs     NULL
 #define xen_initdom_restore_msi_irqs   NULL
-#endif /* !CONFIG_XEN_DOM0 */
+#endif /* !CONFIG_XEN_PV_DOM0 */
 
 static void xen_teardown_msi_irqs(struct pci_dev *dev)
 {
@@ -555,7 +555,7 @@ int __init pci_xen_hvm_init(void)
        return 0;
 }
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 int __init pci_xen_initial_domain(void)
 {
        int irq;
@@ -583,6 +583,9 @@ int __init pci_xen_initial_domain(void)
        }
        return 0;
 }
+#endif
+
+#ifdef CONFIG_XEN_DOM0
 
 struct xen_device_domain_owner {
        domid_t domain;
@@ -656,4 +659,4 @@ int xen_unregister_device_domain_owner(struct pci_dev *dev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
-#endif
+#endif /* CONFIG_XEN_DOM0 */
index ee2beda..1d4a00e 100644 (file)
@@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
 
 static struct olpc_ec_driver ec_xo1_5_driver = {
        .ec_cmd = olpc_xo1_ec_cmd,
-#ifdef CONFIG_OLPC_XO1_5_SCI
+#ifdef CONFIG_OLPC_XO15_SCI
        /*
         * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
         * compiled in
index 9ac7457..ed0442e 100644 (file)
 /*
  * PVH variables.
  *
- * pvh_bootparams and pvh_start_info need to live in the data segment since
+ * pvh_bootparams and pvh_start_info need to live in a data segment since
  * they are used after startup_{32|64}, which clear .bss, are invoked.
  */
-struct boot_params pvh_bootparams __section(".data");
-struct hvm_start_info pvh_start_info __section(".data");
+struct boot_params __initdata pvh_bootparams;
+struct hvm_start_info __initdata pvh_start_info;
 
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+const unsigned int __initconst pvh_start_info_sz = sizeof(pvh_start_info);
 
-static u64 pvh_get_root_pointer(void)
+static u64 __init pvh_get_root_pointer(void)
 {
        return pvh_start_info.rsdp_paddr;
 }
@@ -107,7 +107,7 @@ void __init __weak xen_pvh_init(struct boot_params *boot_params)
        BUG();
 }
 
-static void hypervisor_specific_init(bool xen_guest)
+static void __init hypervisor_specific_init(bool xen_guest)
 {
        if (xen_guest)
                xen_pvh_init(&pvh_bootparams);
index afc1da6..6bcd3d8 100644 (file)
@@ -43,13 +43,9 @@ config XEN_PV_SMP
        def_bool y
        depends on XEN_PV && SMP
 
-config XEN_DOM0
-       bool "Xen PV Dom0 support"
-       default y
-       depends on XEN_PV && PCI_XEN && SWIOTLB_XEN
-       depends on X86_IO_APIC && ACPI && PCI
-       help
-         Support running as a Xen PV Dom0 guest.
+config XEN_PV_DOM0
+       def_bool y
+       depends on XEN_PV && XEN_DOM0
 
 config XEN_PVHVM
        def_bool y
@@ -86,3 +82,12 @@ config XEN_PVH
        def_bool n
        help
          Support for running as a Xen PVH guest.
+
+config XEN_DOM0
+       bool "Xen Dom0 support"
+       default XEN_PV
+       depends on (XEN_PV && SWIOTLB_XEN) || (XEN_PVH && X86_64)
+       depends on X86_IO_APIC && ACPI && PCI
+       select X86_X2APIC if XEN_PVH && X86_64
+       help
+         Support running as a Xen Dom0 guest.
index 40b5779..4953260 100644 (file)
@@ -45,7 +45,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
 
-obj-$(CONFIG_XEN_DOM0)         += vga.o
+obj-$(CONFIG_XEN_PV_DOM0)      += vga.o
 
 obj-$(CONFIG_SWIOTLB_XEN)      += pci-swiotlb-xen.o
 
index c79bd0a..95d9703 100644 (file)
@@ -3,6 +3,7 @@
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
 #include <linux/memblock.h>
 #endif
+#include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/kexec.h>
 #include <linux/slab.h>
 
 #include <xen/xen.h>
 #include <xen/features.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/version.h>
 #include <xen/page.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/cpu.h>
 #include <asm/e820/api.h> 
+#include <asm/setup.h>
 
 #include "xen-ops.h"
 #include "smp.h"
@@ -52,9 +56,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
 
-enum xen_domain_type xen_domain_type = XEN_NATIVE;
-EXPORT_SYMBOL_GPL(xen_domain_type);
-
 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
 EXPORT_SYMBOL(machine_to_phys_mapping);
 unsigned long  machine_to_phys_nr;
@@ -69,10 +70,12 @@ __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
 /*
- * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
- * before clearing the bss.
+ * NB: These need to live in .data or alike because they're used by
+ * xen_prepare_pvh() which runs before clearing the bss.
  */
-uint32_t xen_start_flags __section(".data") = 0;
+enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
+EXPORT_SYMBOL_GPL(xen_domain_type);
+uint32_t __ro_after_init xen_start_flags;
 EXPORT_SYMBOL(xen_start_flags);
 
 /*
@@ -258,6 +261,45 @@ int xen_vcpu_setup(int cpu)
        return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
 }
 
+void __init xen_banner(void)
+{
+       unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
+       struct xen_extraversion extra;
+
+       HYPERVISOR_xen_version(XENVER_extraversion, &extra);
+
+       pr_info("Booting kernel on %s\n", pv_info.name);
+       pr_info("Xen version: %u.%u%s%s\n",
+               version >> 16, version & 0xffff, extra.extraversion,
+               xen_feature(XENFEAT_mmu_pt_update_preserve_ad)
+               ? " (preserve-AD)" : "");
+}
+
+/* Check if running on Xen version (major, minor) or later */
+bool xen_running_on_version_or_later(unsigned int major, unsigned int minor)
+{
+       unsigned int version;
+
+       if (!xen_domain())
+               return false;
+
+       version = HYPERVISOR_xen_version(XENVER_version, NULL);
+       if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
+               ((version >> 16) > major))
+               return true;
+       return false;
+}
+
+void __init xen_add_preferred_consoles(void)
+{
+       add_preferred_console("xenboot", 0, NULL);
+       if (!boot_params.screen_info.orig_video_isVGA)
+               add_preferred_console("tty", 0, NULL);
+       add_preferred_console("hvc", 0, NULL);
+       if (boot_params.screen_info.orig_video_isVGA)
+               add_preferred_console("tty", 0, NULL);
+}
+
 void xen_reboot(int reason)
 {
        struct sched_shutdown r = { .reason = reason };
index 6e0d075..a7b7d67 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/mm.h>
 #include <linux/page-flags.h>
 #include <linux/highmem.h>
-#include <linux/console.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
 #include <linux/edd.h>
@@ -109,17 +108,6 @@ struct tls_descs {
  */
 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
 
-static void __init xen_banner(void)
-{
-       unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
-       struct xen_extraversion extra;
-       HYPERVISOR_xen_version(XENVER_extraversion, &extra);
-
-       pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
-       pr_info("Xen version: %d.%d%s (preserve-AD)\n",
-               version >> 16, version & 0xffff, extra.extraversion);
-}
-
 static void __init xen_pv_init_platform(void)
 {
        populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP));
@@ -142,22 +130,6 @@ static void __init xen_pv_guest_late_init(void)
 #endif
 }
 
-/* Check if running on Xen version (major, minor) or later */
-bool
-xen_running_on_version_or_later(unsigned int major, unsigned int minor)
-{
-       unsigned int version;
-
-       if (!xen_domain())
-               return false;
-
-       version = HYPERVISOR_xen_version(XENVER_version, NULL);
-       if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
-               ((version >> 16) > major))
-               return true;
-       return false;
-}
-
 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
 static __read_mostly unsigned int cpuid_leaf5_edx_val;
 
@@ -1364,7 +1336,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
        boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
 
        if (!xen_initial_domain()) {
-               add_preferred_console("xenboot", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
                x86_platform.set_legacy_features =
@@ -1409,11 +1380,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 #endif
        }
 
-       if (!boot_params.screen_info.orig_video_isVGA)
-               add_preferred_console("tty", 0, NULL);
-       add_preferred_console("hvc", 0, NULL);
-       if (boot_params.screen_info.orig_video_isVGA)
-               add_preferred_console("tty", 0, NULL);
+       xen_add_preferred_consoles();
 
 #ifdef CONFIG_PCI
        /* PCI BIOS service won't work from a PV guest. */
index 0d5e34b..bcae606 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/acpi.h>
+#include <linux/export.h>
 
 #include <xen/hvc-console.h>
 
 /*
  * PVH variables.
  *
- * The variable xen_pvh needs to live in the data segment since it is used
+ * The variable xen_pvh needs to live in a data segment since it is used
  * after startup_{32|64} is invoked, which will clear the .bss segment.
  */
-bool xen_pvh __section(".data") = 0;
+bool __ro_after_init xen_pvh;
+EXPORT_SYMBOL_GPL(xen_pvh);
 
 void __init xen_pvh_init(struct boot_params *boot_params)
 {
@@ -36,6 +38,10 @@ void __init xen_pvh_init(struct boot_params *boot_params)
        pfn = __pa(hypercall_page);
        wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
 
+       if (xen_initial_domain())
+               x86_init.oem.arch_setup = xen_add_preferred_consoles;
+       x86_init.oem.banner = xen_banner;
+
        xen_efi_init(boot_params);
 }
 
index 8d75193..3359c23 100644 (file)
@@ -2398,7 +2398,7 @@ static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
 
 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages)
+                 unsigned int domid, bool no_translate)
 {
        int err = 0;
        struct remap_data rmd;
index 8d7ec49..8bc8b72 100644 (file)
@@ -51,6 +51,7 @@ void __init xen_remap_memory(void);
 phys_addr_t __init xen_find_free_area(phys_addr_t size);
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
+void xen_banner(void);
 void xen_enable_sysenter(void);
 void xen_enable_syscall(void);
 void xen_vcpu_restore(void);
@@ -109,7 +110,7 @@ static inline void xen_uninit_lock_cpu(int cpu)
 
 struct dom0_vga_console_info;
 
-#ifdef CONFIG_XEN_DOM0
+#ifdef CONFIG_XEN_PV_DOM0
 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
@@ -118,6 +119,8 @@ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
 }
 #endif
 
+void xen_add_preferred_consoles(void);
+
 void __init xen_init_apic(void);
 
 #ifdef CONFIG_XEN_EFI
index 7cbf68c..6fc05cb 100644 (file)
@@ -78,7 +78,7 @@
 #endif
 #define XCHAL_KIO_SIZE                 0x10000000
 
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
 #define XCHAL_KIO_PADDR                        xtensa_get_kio_paddr()
 #ifndef __ASSEMBLY__
 extern unsigned long xtensa_kio_paddr;
index 764b54b..15051a8 100644 (file)
@@ -143,7 +143,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq)
 
 void __init init_IRQ(void)
 {
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
        irqchip_init();
 #else
 #ifdef CONFIG_HAVE_SMP
index ed18410..ee9082a 100644 (file)
@@ -63,7 +63,7 @@ extern unsigned long initrd_end;
 extern int initrd_below_start_ok;
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 void *dtb_start = __dtb_start;
 #endif
 
@@ -125,7 +125,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
 
 #endif /* CONFIG_BLK_DEV_INITRD */
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 static int __init parse_tag_fdt(const bp_tag_t *tag)
 {
@@ -135,7 +135,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
 
 __tagtable(BP_TAG_FDT, parse_tag_fdt);
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
 
 static int __init parse_tag_cmdline(const bp_tag_t* tag)
 {
@@ -183,7 +183,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
 }
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -232,7 +232,7 @@ void __init early_init_devtree(void *params)
                strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 }
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
 
 /*
  * Initialize architecture. (Early stage)
@@ -253,7 +253,7 @@ void __init init_arch(bp_tag_t *bp_start)
        if (bp_start)
                parse_bootparam(bp_start);
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
        early_init_devtree(dtb_start);
 #endif
 
index 7e4d97d..38acda4 100644 (file)
@@ -101,7 +101,7 @@ void init_mmu(void)
 
 void init_kio(void)
 {
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
        /*
         * Update the IO area mapping in case xtensa_kio_paddr has changed
         */
index 4f7d614..538e674 100644 (file)
@@ -51,8 +51,12 @@ void platform_power_off(void)
 
 void platform_restart(void)
 {
-       /* Flush and reset the mmu, simulate a processor reset, and
-        * jump to the reset vector. */
+       /* Try software reset first. */
+       WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
+
+       /* If software reset did not work, flush and reset the mmu,
+        * simulate a processor reset, and jump to the reset vector.
+        */
        cpu_reset();
        /* control never gets here */
 }
@@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
 
 #endif
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 
 static void __init xtfpga_clk_setup(struct device_node *np)
 {
@@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
  */
 arch_initcall(xtavnet_init);
 
-#endif /* CONFIG_OF */
+#endif /* CONFIG_USE_OF */
index cf2780c..485a258 100644 (file)
@@ -490,7 +490,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
        bdev = I_BDEV(inode);
        mutex_init(&bdev->bd_fsfreeze_mutex);
        spin_lock_init(&bdev->bd_size_lock);
-       bdev->bd_disk = disk;
        bdev->bd_partno = partno;
        bdev->bd_inode = inode;
        bdev->bd_stats = alloc_percpu(struct disk_stats);
@@ -498,6 +497,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
                iput(inode);
                return NULL;
        }
+       bdev->bd_disk = disk;
        return bdev;
 }
 
index 4b66d27..3b38d15 100644 (file)
@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(PCI_P2PDMA),
        QUEUE_FLAG_NAME(ZONE_RESETALL),
        QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
+       QUEUE_FLAG_NAME(HCTX_ACTIVE),
        QUEUE_FLAG_NAME(NOWAIT),
 };
 #undef QUEUE_FLAG_NAME
index 7b6e5e1..496e845 100644 (file)
@@ -1268,6 +1268,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
 
 out_destroy_part_tbl:
        xa_destroy(&disk->part_tbl);
+       disk->part0->bd_disk = NULL;
        iput(disk->part0->bd_inode);
 out_free_bdi:
        bdi_put(disk->bdi);
index 30d2db3..0d399dd 100644 (file)
@@ -17,6 +17,8 @@ source "drivers/bus/Kconfig"
 
 source "drivers/connector/Kconfig"
 
+source "drivers/firmware/Kconfig"
+
 source "drivers/gnss/Kconfig"
 
 source "drivers/mtd/Kconfig"
index 0a0a982..c0e77c1 100644 (file)
@@ -36,7 +36,7 @@ struct acpi_gtdt_descriptor {
 
 static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
 
-static inline void *next_platform_timer(void *platform_timer)
+static inline __init void *next_platform_timer(void *platform_timer)
 {
        struct acpi_gtdt_header *gh = platform_timer;
 
index 64b2f3d..7f76fee 100644 (file)
@@ -2,4 +2,4 @@
 obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)  += test_async_driver_probe.o
 
 obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
-CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
+CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
index 0bebd5a..cda7d71 100644 (file)
@@ -203,10 +203,7 @@ config INTEL_STRATIX10_RSU
          Say Y here if you want Intel RSU support.
 
 config QCOM_SCM
-       tristate "Qcom SCM driver"
-       depends on ARCH_QCOM || COMPILE_TEST
-       depends on HAVE_ARM_SMCCC
-       select RESET_CONTROLLER
+       tristate
 
 config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
        bool "Qualcomm download mode enabled by default"
index d356e32..269437b 100644 (file)
@@ -1087,6 +1087,7 @@ struct amdgpu_device {
 
        bool                            no_hw_access;
        struct pci_saved_state          *pci_state;
+       pci_channel_state_t             pci_channel_state;
 
        struct amdgpu_reset_control     *reset_cntl;
 };
index 2d6b2d7..054c1a2 100644 (file)
@@ -563,6 +563,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
 
        dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
        sg_free_table(ttm->sg);
+       kfree(ttm->sg);
        ttm->sg = NULL;
 }
 
index ab3794c..af9bdf1 100644 (file)
@@ -2394,10 +2394,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                goto init_failed;
 
-       r = amdgpu_amdkfd_resume_iommu(adev);
-       if (r)
-               goto init_failed;
-
        r = amdgpu_device_ip_hw_init_phase1(adev);
        if (r)
                goto init_failed;
@@ -2436,6 +2432,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (!adev->gmc.xgmi.pending_reset)
                amdgpu_amdkfd_device_init(adev);
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               goto init_failed;
+
        amdgpu_fru_get_product_info(adev);
 
 init_failed:
@@ -5399,6 +5399,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
+       adev->pci_channel_state = state;
+
        switch (state) {
        case pci_channel_io_normal:
                return PCI_ERS_RESULT_CAN_RECOVER;
@@ -5541,6 +5543,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
 
        DRM_INFO("PCI error: resume callback!!\n");
 
+       /* Only continue execution for the case of pci_channel_io_frozen */
+       if (adev->pci_channel_state != pci_channel_io_frozen)
+               return;
+
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
index e7f06bd..1916ec8 100644 (file)
@@ -31,6 +31,8 @@
 /* delay 0.1 second to enable gfx off feature */
 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
 
+#define GFX_OFF_NO_DELAY 0
+
 /*
  * GPU GFX IP block helpers function.
  */
@@ -558,6 +560,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 
 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 {
+       unsigned long delay = GFX_OFF_DELAY_ENABLE;
+
        if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
                return;
 
@@ -573,8 +577,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
                adev->gfx.gfx_off_req_count--;
 
-               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
-                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+               if (adev->gfx.gfx_off_req_count == 0 &&
+                   !adev->gfx.gfx_off_state) {
+                       /* If going to s2idle, no need to wait */
+                       if (adev->in_s0ix)
+                               delay = GFX_OFF_NO_DELAY;
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+                                             delay);
+               }
        } else {
                if (adev->gfx.gfx_off_req_count == 0) {
                        cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
index c2a4d92..4a41623 100644 (file)
@@ -1085,18 +1085,12 @@ static int kfd_resume(struct kfd_dev *kfd)
        int err = 0;
 
        err = kfd->dqm->ops.start(kfd->dqm);
-       if (err) {
+       if (err)
                dev_err(kfd_device,
                        "Error starting queue manager for device %x:%x\n",
                        kfd->pdev->vendor, kfd->pdev->device);
-               goto dqm_start_error;
-       }
 
        return err;
-
-dqm_start_error:
-       kfd_iommu_suspend(kfd);
-       return err;
 }
 
 static inline void kfd_queue_work(struct workqueue_struct *wq,
index 7dffc04..127667e 100644 (file)
@@ -25,6 +25,8 @@ config DRM_AMD_DC_HDCP
 
 config DRM_AMD_DC_SI
        bool "AMD DC support for Southern Islands ASICs"
+       depends on DRM_AMDGPU_SI
+       depends on DRM_AMD_DC
        default n
        help
          Choose this option to enable new AMD DC support for SI asics
index 05eaec0..6d655e1 100644 (file)
@@ -1306,12 +1306,6 @@ static void override_training_settings(
 {
        uint32_t lane;
 
-       /* Override link settings */
-       if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
-               lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate;
-       if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN)
-               lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count;
-
        /* Override link spread */
        if (!link->dp_ss_off && overrides->downspread != NULL)
                lt_settings->link_settings.link_spread = *overrides->downspread ?
index d8b2261..c337588 100644 (file)
@@ -118,6 +118,7 @@ struct dcn10_link_enc_registers {
        uint32_t RDPCSTX_PHY_CNTL4;
        uint32_t RDPCSTX_PHY_CNTL5;
        uint32_t RDPCSTX_PHY_CNTL6;
+       uint32_t RDPCSPIPE_PHY_CNTL6;
        uint32_t RDPCSTX_PHY_CNTL7;
        uint32_t RDPCSTX_PHY_CNTL8;
        uint32_t RDPCSTX_PHY_CNTL9;
index 90127c1..b089244 100644 (file)
@@ -37,6 +37,7 @@
 
 #include "link_enc_cfg.h"
 #include "dc_dmub_srv.h"
+#include "dal_asic_id.h"
 
 #define CTX \
        enc10->base.ctx
 #define AUX_REG_WRITE(reg_name, val) \
                        dm_write_reg(CTX, AUX_REG(reg_name), val)
 
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
 void dcn31_link_encoder_set_dio_phy_mux(
        struct link_encoder *enc,
        enum encoder_type_select sel,
@@ -215,8 +220,8 @@ static const struct link_encoder_funcs dcn31_link_enc_funcs = {
        .fec_is_active = enc2_fec_is_active,
        .get_dig_frontend = dcn10_get_dig_frontend,
        .get_dig_mode = dcn10_get_dig_mode,
-       .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
-       .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+       .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
+       .get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
        .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
 };
 
@@ -404,3 +409,60 @@ void dcn31_link_encoder_disable_output(
        }
 }
 
+bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t dp_alt_mode_disable;
+       bool is_usb_c_alt_mode = false;
+
+       if (enc->features.flags.bits.DP_IS_USB_C) {
+               if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+                       // [Note] no need to check hw_internal_rev once phy mux selection is ready
+                       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+               } else {
+               /*
+                * B0 phys use a new set of registers to check whether alt mode is disabled.
+                * if value == 1 alt mode is disabled, otherwise it is enabled.
+                */
+                       if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+                               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+                       } else {
+                       // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready
+                               REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+                       }
+               }
+
+               is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+       }
+
+       return is_usb_c_alt_mode;
+}
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+                                                                                struct dc_link_settings *link_settings)
+{
+       struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+       uint32_t is_in_usb_c_dp4_mode = 0;
+
+       dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+       /* in usb c dp2 mode, max lane count is 2 */
+       if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+               if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+                       // [Note] no need to check hw_internal_rev once phy mux selection is ready
+                       REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+               } else {
+                       if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B)
+                                       || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+                               REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+                       } else {
+                               REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+                       }
+               }
+               if (!is_in_usb_c_dp4_mode)
+                       link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+       }
+}
index 32d1463..3454f1e 100644 (file)
@@ -69,6 +69,7 @@
        SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \
+       SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \
        SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \
        SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
-       LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+       LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\
        LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\
@@ -243,4 +246,13 @@ void dcn31_link_encoder_disable_output(
        struct link_encoder *enc,
        enum signal_type signal);
 
+/*
+ * Check whether USB-C DP Alt mode is disabled
+ */
+bool dcn31_link_encoder_is_in_alt_mode(
+       struct link_encoder *enc);
+
+void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc,
+       struct dc_link_settings *link_settings);
+
 #endif /* __DC_LINK_ENCODER__DCN31_H__ */
index a7702d3..0006bba 100644 (file)
@@ -928,7 +928,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .disable_dcc = DCC_ENABLE,
        .vsr_support = true,
        .performance_trace = false,
-       .max_downscale_src_width = 7680,/*upto 8K*/
+       .max_downscale_src_width = 3840,/*upto 4K*/
        .disable_pplib_wm_range = false,
        .scl_reset_length10 = true,
        .sanity_checks = false,
@@ -1284,6 +1284,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
        if (!enc1 || !vpg || !afmt)
                return NULL;
 
+       if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+                       ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+               if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
+                       eng_id = eng_id + 3; // For B0 only. C->F, D->G.
+       }
+
        dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
                                        eng_id, vpg, afmt,
                                        &stream_enc_regs[eng_id],
index 381c17c..5adc471 100644 (file)
@@ -227,7 +227,7 @@ enum {
 #define FAMILY_YELLOW_CARP                     146
 
 #define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x02            // TODO: DCN31 - update with correct B0 ID
+#define YELLOW_CARP_B0 0x1A
 #define YELLOW_CARP_UNKNOWN 0xFF
 
 #ifndef ASICREV_IS_YELLOW_CARP
index 92caf84..01a5655 100644 (file)
 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_RX_OVRD_OUT_2                                                0xe0c7
 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_TX_OVRD_IN_2                                                 0xe0c8
 
+//RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                            0x10
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                        0x11
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                    0x12
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                              0x00010000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                          0x00020000L
+#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                      0x00040000L
+
+//RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT                                            0x10
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT                                        0x11
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT                                    0x12
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK                                              0x00010000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK                                          0x00020000L
+#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK                                      0x00040000L
+
+//[Note] Hack. RDPCSPIPE only has 2 instances.
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6                                                              0x2e4b
+#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6                                                              0x2e4b
+#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6                                                              0x2d73
+#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6_BASE_IDX                                                     2
 
 #endif
index 43ec7fc..a3eae3f 100644 (file)
@@ -1577,8 +1577,14 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *intel_crtc;
+       enum pipe pipe;
+
+       if (!crtc_state)
+               return;
+
+       intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       pipe = intel_crtc->pipe;
 
        /* wa verify 1409054076:icl,jsl,ehl */
        if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
index 5322375..4e0f96b 100644 (file)
@@ -1308,8 +1308,9 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
                else
                        aud_freq = aud_freq_init;
 
-               /* use BIOS provided value for TGL unless it is a known bad value */
-               if (IS_TIGERLAKE(dev_priv) && aud_freq_init != AUD_FREQ_TGL_BROKEN)
+               /* use BIOS provided value for TGL and RKL unless it is a known bad value */
+               if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) &&
+                   aud_freq_init != AUD_FREQ_TGL_BROKEN)
                        aud_freq = aud_freq_init;
 
                drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
index e86e6ed..fd71346 100644 (file)
@@ -451,13 +451,23 @@ parse_lfp_backlight(struct drm_i915_private *i915,
        }
 
        i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
-       if (bdb->version >= 191 &&
-           get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
-               const struct lfp_backlight_control_method *method;
+       if (bdb->version >= 191) {
+               size_t exp_size;
 
-               method = &backlight_data->backlight_control[panel_type];
-               i915->vbt.backlight.type = method->type;
-               i915->vbt.backlight.controller = method->controller;
+               if (bdb->version >= 236)
+                       exp_size = sizeof(struct bdb_lfp_backlight_data);
+               else if (bdb->version >= 234)
+                       exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+               else
+                       exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+
+               if (get_blocksize(backlight_data) >= exp_size) {
+                       const struct lfp_backlight_control_method *method;
+
+                       method = &backlight_data->backlight_control[panel_type];
+                       i915->vbt.backlight.type = method->type;
+                       i915->vbt.backlight.controller = method->controller;
+               }
        }
 
        i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
index 9903a78..bd18432 100644 (file)
@@ -3807,7 +3807,13 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
 static void intel_ddi_sync_state(struct intel_encoder *encoder,
                                 const struct intel_crtc_state *crtc_state)
 {
-       if (intel_crtc_has_dp_encoder(crtc_state))
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+       if (intel_phy_is_tc(i915, phy))
+               intel_tc_port_sanitize(enc_to_dig_port(encoder));
+
+       if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
                intel_dp_sync_state(encoder, crtc_state);
 }
 
index 134a6ac..17f44ff 100644 (file)
@@ -13082,18 +13082,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        readout_plane_state(dev_priv);
 
        for_each_intel_encoder(dev, encoder) {
+               struct intel_crtc_state *crtc_state = NULL;
+
                pipe = 0;
 
                if (encoder->get_hw_state(encoder, &pipe)) {
-                       struct intel_crtc_state *crtc_state;
-
                        crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
                        crtc_state = to_intel_crtc_state(crtc->base.state);
 
                        encoder->base.crtc = &crtc->base;
                        intel_encoder_get_config(encoder, crtc_state);
-                       if (encoder->sync_state)
-                               encoder->sync_state(encoder, crtc_state);
 
                        /* read out to slave crtc as well for bigjoiner */
                        if (crtc_state->bigjoiner) {
@@ -13108,6 +13106,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                        encoder->base.crtc = NULL;
                }
 
+               if (encoder->sync_state)
+                       encoder->sync_state(encoder, crtc_state);
+
                drm_dbg_kms(&dev_priv->drm,
                            "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
                            encoder->base.base.id, encoder->base.name,
@@ -13390,17 +13391,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
        intel_modeset_readout_hw_state(dev);
 
        /* HW state is read out, now we need to sanitize this mess. */
-
-       /* Sanitize the TypeC port mode upfront, encoders depend on this */
-       for_each_intel_encoder(dev, encoder) {
-               enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
-               /* We need to sanitize only the MST primary port. */
-               if (encoder->type != INTEL_OUTPUT_DP_MST &&
-                   intel_phy_is_tc(dev_priv, phy))
-                       intel_tc_port_sanitize(enc_to_dig_port(encoder));
-       }
-
        get_encoder_power_domains(dev_priv);
 
        if (HAS_PCH_IBX(dev_priv))
index 330077c..a2108a8 100644 (file)
@@ -814,6 +814,11 @@ struct lfp_brightness_level {
        u16 reserved;
 } __packed;
 
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+       offsetof(struct bdb_lfp_backlight_data, brightness_level)
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+       offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+
 struct bdb_lfp_backlight_data {
        u8 entry_size;
        struct lfp_backlight_data_entry data[16];
index e382b7f..5ab136f 100644 (file)
@@ -118,7 +118,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
        intel_wakeref_t wakeref = 0;
        unsigned long count = 0;
        unsigned long scanned = 0;
-       int err;
+       int err = 0;
 
        /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
        bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -242,12 +242,15 @@ skip:
                list_splice_tail(&still_in_list, phase->list);
                spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
                if (err)
-                       return err;
+                       break;
        }
 
        if (shrink & I915_SHRINK_BOUND)
                intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 
+       if (err)
+               return err;
+
        if (nr_scanned)
                *nr_scanned += scanned;
        return count;
index 664970f..4037030 100644 (file)
@@ -8193,6 +8193,11 @@ enum {
 #define  HSW_SPR_STRETCH_MAX_X1                REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
 #define  HSW_FBCQ_DIS                  (1 << 22)
 #define  BDW_DPRS_MASK_VBLANK_SRD      (1 << 0)
+#define  SKL_PLANE1_STRETCH_MAX_MASK   REG_GENMASK(1, 0)
+#define  SKL_PLANE1_STRETCH_MAX_X8     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define  SKL_PLANE1_STRETCH_MAX_X4     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define  SKL_PLANE1_STRETCH_MAX_X2     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define  SKL_PLANE1_STRETCH_MAX_X1     REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
 #define _CHICKEN_TRANS_A       0x420c0
index 65bc370..a725792 100644 (file)
@@ -76,6 +76,8 @@ struct intel_wm_config {
 
 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
 {
+       enum pipe pipe;
+
        if (HAS_LLC(dev_priv)) {
                /*
                 * WaCompressedResourceDisplayNewHashMode:skl,kbl
@@ -89,6 +91,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
                           SKL_DE_COMPRESSED_HASH_MODE);
        }
 
+       for_each_pipe(dev_priv, pipe) {
+               /*
+                * "Plane N strech max must be programmed to 11b (x1)
+                *  when Async flips are enabled on that plane."
+                */
+               if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active())
+                       intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
+                                        SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1);
+       }
+
        /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
        intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
                   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
index 1c2f479..12ce669 100644 (file)
@@ -172,10 +172,10 @@ static int kmb_setup_mode_config(struct drm_device *drm)
        ret = drmm_mode_config_init(drm);
        if (ret)
                return ret;
-       drm->mode_config.min_width = KMB_MIN_WIDTH;
-       drm->mode_config.min_height = KMB_MIN_HEIGHT;
-       drm->mode_config.max_width = KMB_MAX_WIDTH;
-       drm->mode_config.max_height = KMB_MAX_HEIGHT;
+       drm->mode_config.min_width = KMB_FB_MIN_WIDTH;
+       drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
+       drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
+       drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
        drm->mode_config.funcs = &kmb_mode_config_funcs;
 
        ret = kmb_setup_crtc(drm);
index ebbaa5f..69a62e2 100644 (file)
 #define DRIVER_MAJOR                   1
 #define DRIVER_MINOR                   1
 
+#define KMB_FB_MAX_WIDTH               1920
+#define KMB_FB_MAX_HEIGHT              1080
+#define KMB_FB_MIN_WIDTH               1
+#define KMB_FB_MIN_HEIGHT              1
+
 #define KMB_LCD_DEFAULT_CLK            200000000
 #define KMB_SYS_CLK_MHZ                        500
 
index ecee678..06b0c42 100644 (file)
@@ -94,9 +94,10 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
        if (ret)
                return ret;
 
-       if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT)
-               return -EINVAL;
-       if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT)
+       if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH ||
+           new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT ||
+           new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
+           new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
                return -EINVAL;
        can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
        crtc_state =
@@ -277,6 +278,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id)
        kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
 }
 
+static void kmb_plane_set_alpha(struct kmb_drm_private *kmb,
+                               const struct drm_plane_state *state,
+                               unsigned char plane_id,
+                               unsigned int *val)
+{
+       u16 plane_alpha = state->alpha;
+       u16 pixel_blend_mode = state->pixel_blend_mode;
+       int has_alpha = state->fb->format->has_alpha;
+
+       if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE)
+               *val |= LCD_LAYER_ALPHA_STATIC;
+
+       if (has_alpha) {
+               switch (pixel_blend_mode) {
+               case DRM_MODE_BLEND_PIXEL_NONE:
+                       break;
+               case DRM_MODE_BLEND_PREMULTI:
+                       *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT;
+                       break;
+               case DRM_MODE_BLEND_COVERAGE:
+                       *val |= LCD_LAYER_ALPHA_EMBED;
+                       break;
+               default:
+                       DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n",
+                                 __stringify(pixel_blend_mode),
+                                 (long)pixel_blend_mode);
+                       break;
+               }
+       }
+
+       if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) {
+               *val &= LCD_LAYER_ALPHA_DISABLED;
+               return;
+       }
+
+       kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha);
+}
+
 static void kmb_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_atomic_state *state)
 {
@@ -303,11 +342,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
        fb = new_plane_state->fb;
        if (!fb)
                return;
+
        num_planes = fb->format->num_planes;
        kmb_plane = to_kmb_plane(plane);
-       plane_id = kmb_plane->id;
 
        kmb = to_kmb(plane->dev);
+       plane_id = kmb_plane->id;
 
        spin_lock_irq(&kmb->irq_lock);
        if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
@@ -400,20 +440,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
                config_csc(kmb, plane_id);
        }
 
+       kmb_plane_set_alpha(kmb, plane->state, plane_id, &val);
+
        kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
 
+       /* Configure LCD_CONTROL */
+       ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
+
+       /* Set layer blending config */
+       ctrl &= ~LCD_CTRL_ALPHA_ALL;
+       ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 |
+               LCD_CTRL_ALPHA_BLEND_VL2;
+
+       ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE;
+
        switch (plane_id) {
        case LAYER_0:
-               ctrl = LCD_CTRL_VL1_ENABLE;
+               ctrl |= LCD_CTRL_VL1_ENABLE;
                break;
        case LAYER_1:
-               ctrl = LCD_CTRL_VL2_ENABLE;
+               ctrl |= LCD_CTRL_VL2_ENABLE;
                break;
        case LAYER_2:
-               ctrl = LCD_CTRL_GL1_ENABLE;
+               ctrl |= LCD_CTRL_GL1_ENABLE;
                break;
        case LAYER_3:
-               ctrl = LCD_CTRL_GL2_ENABLE;
+               ctrl |= LCD_CTRL_GL2_ENABLE;
                break;
        }
 
@@ -425,7 +477,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
         */
        ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
 
-       kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
+       kmb_write_lcd(kmb, LCD_CONTROL, ctrl);
 
        /* Enable pipeline AXI read transactions for the DMA
         * after setting graphics layers. This must be done
@@ -490,6 +542,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
        enum drm_plane_type plane_type;
        const u32 *plane_formats;
        int num_plane_formats;
+       unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+                                 BIT(DRM_MODE_BLEND_PREMULTI)   |
+                                 BIT(DRM_MODE_BLEND_COVERAGE);
 
        for (i = 0; i < KMB_MAX_PLANES; i++) {
                plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
@@ -521,8 +576,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
                drm_dbg(drm, "%s : %d i=%d type=%d",
                        __func__, __LINE__,
                          i, plane_type);
+               drm_plane_create_alpha_property(&plane->base_plane);
+
+               drm_plane_create_blend_mode_property(&plane->base_plane,
+                                                    blend_caps);
+
+               drm_plane_create_zpos_immutable_property(&plane->base_plane, i);
+
                drm_plane_helper_add(&plane->base_plane,
                                     &kmb_plane_helper_funcs);
+
                if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
                        primary = plane;
                        kmb->plane = plane;
index 486490f..6e8d22c 100644 (file)
@@ -35,6 +35,9 @@
 #define POSSIBLE_CRTCS 1
 #define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane)
 
+#define POSSIBLE_CRTCS         1
+#define KMB_MAX_PLANES         2
+
 enum layer_id {
        LAYER_0,
        LAYER_1,
@@ -43,8 +46,6 @@ enum layer_id {
        /* KMB_MAX_PLANES */
 };
 
-#define KMB_MAX_PLANES 1
-
 enum sub_plane_id {
        Y_PLANE,
        U_PLANE,
index 4815056..9756101 100644 (file)
 #define LCD_CTRL_OUTPUT_ENABLED                          BIT(19)
 #define LCD_CTRL_BPORCH_ENABLE                   BIT(21)
 #define LCD_CTRL_FPORCH_ENABLE                   BIT(22)
+#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE       BIT(23)
 #define LCD_CTRL_PIPELINE_DMA                    BIT(28)
 #define LCD_CTRL_VHSYNC_IDLE_LVL                 BIT(31)
+#define LCD_CTRL_ALPHA_ALL                       (0xff << 6)
 
 /* interrupts */
 #define LCD_INT_STATUS                         (0x4 * 0x001)
 #define LCD_LAYER_ALPHA_EMBED                  BIT(5)
 #define LCD_LAYER_ALPHA_COMBI                  (LCD_LAYER_ALPHA_STATIC | \
                                                      LCD_LAYER_ALPHA_EMBED)
+#define LCD_LAYER_ALPHA_DISABLED               ~(LCD_LAYER_ALPHA_COMBI)
 /* RGB multiplied with alpha */
 #define LCD_LAYER_ALPHA_PREMULT                        BIT(6)
 #define LCD_LAYER_INVERT_COL                   BIT(7)
index e9c6af7..3ddf739 100644 (file)
@@ -17,7 +17,7 @@ config DRM_MSM
        select DRM_SCHED
        select SHMEM
        select TMPFS
-       select QCOM_SCM if ARCH_QCOM
+       select QCOM_SCM
        select WANT_DEV_COREDUMP
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
@@ -55,7 +55,7 @@ config DRM_MSM_GPU_SUDO
 
 config DRM_MSM_HDMI_HDCP
        bool "Enable HDMI HDCP support in MSM DRM driver"
-       depends on DRM_MSM && QCOM_SCM
+       depends on DRM_MSM
        default y
        help
          Choose this option to enable HDCP state machine
index b8c31b6..66f32d9 100644 (file)
@@ -704,6 +704,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = {
        .open = nv50_crc_debugfs_flip_threshold_open,
        .read = seq_read,
        .write = nv50_crc_debugfs_flip_threshold_set,
+       .release = single_release,
 };
 
 int nv50_head_crc_late_register(struct nv50_head *head)
index d66f972..72099d1 100644 (file)
@@ -52,6 +52,7 @@ nv50_head_flush_clr(struct nv50_head *head,
 void
 nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
+       if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.olut   ) {
                asyh->olut.offset = nv50_lut_load(&head->olut,
                                                  asyh->olut.buffer,
@@ -67,7 +68,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
        if (asyh->set.view   ) head->func->view    (head, asyh);
        if (asyh->set.mode   ) head->func->mode    (head, asyh);
        if (asyh->set.core   ) head->func->core_set(head, asyh);
-       if (asyh->set.curs   ) head->func->curs_set(head, asyh);
        if (asyh->set.base   ) head->func->base    (head, asyh);
        if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
        if (asyh->set.dither ) head->func->dither  (head, asyh);
index c68cc95..a582c0c 100644 (file)
@@ -71,6 +71,7 @@
 #define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
 #define VOLTA_CHANNEL_GPFIFO_A                        /* clc36f.h */ 0x0000c36f
 #define TURING_CHANNEL_GPFIFO_A                       /* clc36f.h */ 0x0000c46f
+#define AMPERE_CHANNEL_GPFIFO_B                       /* clc36f.h */ 0x0000c76f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
 #define PASCAL_DMA_COPY_B                                            0x0000c1b5
 #define VOLTA_DMA_COPY_A                                             0x0000c3b5
 #define TURING_DMA_COPY_A                                            0x0000c5b5
+#define AMPERE_DMA_COPY_B                                            0x0000c7b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
index 54fab7c..64ee82c 100644 (file)
@@ -77,4 +77,5 @@ int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
 int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
+int ga102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **);
 #endif
index 6d07e65..c58bcdb 100644 (file)
@@ -844,6 +844,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                            struct ttm_resource *, struct ttm_resource *);
                int (*init)(struct nouveau_channel *, u32 handle);
        } _methods[] = {
+               {  "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
index 80099ef..ea77691 100644 (file)
@@ -250,7 +250,8 @@ static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                    u64 runlist, bool priv, struct nouveau_channel **pchan)
 {
-       static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+       static const u16 oclasses[] = { AMPERE_CHANNEL_GPFIFO_B,
+                                       TURING_CHANNEL_GPFIFO_A,
                                        VOLTA_CHANNEL_GPFIFO_A,
                                        PASCAL_CHANNEL_GPFIFO_A,
                                        MAXWELL_CHANNEL_GPFIFO_A,
@@ -386,7 +387,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 
        nvif_object_map(&chan->user, NULL, 0);
 
-       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
+       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO &&
+           chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) {
                ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled",
                                       nouveau_channel_killed,
                                       true, NV906F_V0_NTFY_KILLED,
index c2bc05e..1cbe010 100644 (file)
@@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = {
        .open = nouveau_debugfs_pstate_open,
        .read = seq_read,
        .write = nouveau_debugfs_pstate_set,
+       .release = single_release,
 };
 
 static struct drm_info_list nouveau_debugfs_list[] = {
index 1f828c9..6109cd9 100644 (file)
@@ -345,6 +345,9 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
        u32 arg0, arg1;
        int ret;
 
+       if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
+               return;
+
        /* Allocate channel that has access to the graphics engine. */
        if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
                arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
@@ -469,6 +472,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                case PASCAL_CHANNEL_GPFIFO_A:
                case VOLTA_CHANNEL_GPFIFO_A:
                case TURING_CHANNEL_GPFIFO_A:
+               case AMPERE_CHANNEL_GPFIFO_B:
                        ret = nvc0_fence_create(drm);
                        break;
                default:
index 5b27845..8c2ecc2 100644 (file)
@@ -247,10 +247,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
        }
 
        ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
-       if (ret) {
-               nouveau_bo_ref(NULL, &nvbo);
+       if (ret)
                return ret;
-       }
 
        /* we restrict allowed domains on nv50+ to only the types
         * that were requested at creation time.  not possibly on
index 7c9c928..c3526a8 100644 (file)
@@ -204,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm)
        priv->base.context_new = nv84_fence_context_new;
        priv->base.context_del = nv84_fence_context_del;
 
-       priv->base.uevent = true;
+       priv->base.uevent = drm->client.device.info.family < NV_DEVICE_INFO_V0_AMPERE;
 
        mutex_init(&priv->mutex);
 
index 93ddf63..ca75c5f 100644 (file)
@@ -2602,6 +2602,7 @@ nv172_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static const struct nvkm_device_chip
@@ -2622,6 +2623,7 @@ nv174_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static const struct nvkm_device_chip
@@ -2642,6 +2644,7 @@ nv177_chipset = {
        .top      = { 0x00000001, ga100_top_new },
        .disp     = { 0x00000001, ga102_disp_new },
        .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
 };
 
 static int
index 3209eb7..5e831d3 100644 (file)
@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/fifo/gp100.o
 nvkm-y += nvkm/engine/fifo/gp10b.o
 nvkm-y += nvkm/engine/fifo/gv100.o
 nvkm-y += nvkm/engine/fifo/tu102.o
+nvkm-y += nvkm/engine/fifo/ga102.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
new file mode 100644 (file)
index 0000000..c630dbd
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine)
+#define ga102_chan(p) container_of((p), struct ga102_chan, object)
+#include <engine/fifo.h>
+#include "user.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+#include <nvif/cl0080.h>
+#include <nvif/clc36f.h>
+#include <nvif/class.h>
+
+struct ga102_fifo {
+       struct nvkm_fifo base;
+};
+
+struct ga102_chan {
+       struct nvkm_object object;
+
+       struct {
+               u32 runl;
+               u32 chan;
+       } ctrl;
+
+       struct nvkm_memory *mthd;
+       struct nvkm_memory *inst;
+       struct nvkm_memory *user;
+       struct nvkm_memory *runl;
+
+       struct nvkm_vmm *vmm;
+};
+
+static int
+ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+       if (index == 0) {
+               oclass->ctor = nvkm_object_new;
+               oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B };
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int
+ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+              enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+       u64 bar2 = nvkm_memory_bar2(chan->user);
+
+       if (bar2 == ~0ULL)
+               return -EFAULT;
+
+       *type = NVKM_OBJECT_MAP_IO;
+       *addr = device->func->resource_addr(device, 3) + bar2;
+       *size = 0x1000;
+       return 0;
+}
+
+static int
+ga102_chan_fini(struct nvkm_object *object, bool suspend)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+
+       nvkm_wr32(device, chan->ctrl.chan, 0x00000003);
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000);
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000))
+                       break;
+       );
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x088, 0);
+
+       nvkm_wr32(device, chan->ctrl.chan, 0xffffffff);
+       return 0;
+}
+
+static int
+ga102_chan_init(struct nvkm_object *object)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+       struct nvkm_device *device = chan->object.engine->subdev.device;
+
+       nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000);
+
+       nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl)));
+       nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl)));
+       nvkm_wr32(device, chan->ctrl.runl + 0x088, 2);
+
+       nvkm_wr32(device, chan->ctrl.chan, 0x00000002);
+       nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0);
+       return 0;
+}
+
+static void *
+ga102_chan_dtor(struct nvkm_object *object)
+{
+       struct ga102_chan *chan = ga102_chan(object);
+
+       if (chan->vmm) {
+               nvkm_vmm_part(chan->vmm, chan->inst);
+               nvkm_vmm_unref(&chan->vmm);
+       }
+
+       nvkm_memory_unref(&chan->runl);
+       nvkm_memory_unref(&chan->user);
+       nvkm_memory_unref(&chan->inst);
+       nvkm_memory_unref(&chan->mthd);
+       return chan;
+}
+
+static const struct nvkm_object_func
+ga102_chan = {
+       .dtor = ga102_chan_dtor,
+       .init = ga102_chan_init,
+       .fini = ga102_chan_fini,
+       .map = ga102_chan_map,
+       .sclass = ga102_chan_sclass,
+};
+
+static int
+ga102_chan_new(struct nvkm_device *device,
+              const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+       struct volta_channel_gpfifo_a_v0 *args = argv;
+       struct nvkm_top_device *tdev;
+       struct nvkm_vmm *vmm;
+       struct ga102_chan *chan;
+       int ret;
+
+       if (argc != sizeof(*args))
+               return -ENOSYS;
+
+       vmm = nvkm_uvmm_search(oclass->client, args->vmm);
+       if (IS_ERR(vmm))
+               return PTR_ERR(vmm);
+
+       if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&ga102_chan, oclass, &chan->object);
+       *pobject = &chan->object;
+
+       list_for_each_entry(tdev, &device->top->device, head) {
+               if (tdev->type == NVKM_ENGINE_CE) {
+                       chan->ctrl.runl = tdev->runlist;
+                       break;
+               }
+       }
+
+       if (!chan->ctrl.runl)
+               return -ENODEV;
+
+       chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0;
+
+       args->chid = 0;
+       args->inst = 0;
+       args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst);
+       if (ret)
+               return ret;
+
+       nvkm_kmap(chan->inst);
+       nvkm_wo32(chan->inst, 0x010, 0x0000face);
+       nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+       nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset));
+       nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) |
+                                    (order_base_2(args->ilength / 8) << 16));
+       nvkm_wo32(chan->inst, 0x084, 0x20400000);
+       nvkm_wo32(chan->inst, 0x094, 0x30000001);
+       nvkm_wo32(chan->inst, 0x0ac, 0x00020000);
+       nvkm_wo32(chan->inst, 0x0e4, 0x00000000);
+       nvkm_wo32(chan->inst, 0x0e8, 0);
+       nvkm_wo32(chan->inst, 0x0f4, 0x00001000);
+       nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
+       nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+       nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd)));
+       nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd)));
+       nvkm_done(chan->inst);
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl);
+       if (ret)
+               return ret;
+
+       nvkm_kmap(chan->runl);
+       nvkm_wo32(chan->runl, 0x00, 0x80030001);
+       nvkm_wo32(chan->runl, 0x04, 1);
+       nvkm_wo32(chan->runl, 0x08, 0);
+       nvkm_wo32(chan->runl, 0x0c, 0x00000000);
+       nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user)));
+       nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user)));
+       nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst)));
+       nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst)));
+       nvkm_done(chan->runl);
+
+       ret = nvkm_vmm_join(vmm, chan->inst);
+       if (ret)
+               return ret;
+
+       chan->vmm = nvkm_vmm_ref(vmm);
+       return 0;
+}
+
+static const struct nvkm_device_oclass
+ga102_chan_oclass = {
+       .ctor = ga102_chan_new,
+};
+
+static int
+ga102_user_new(struct nvkm_device *device,
+              const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
+{
+       return tu102_fifo_user_new(oclass, argv, argc, pobject);
+}
+
+static const struct nvkm_device_oclass
+ga102_user_oclass = {
+       .ctor = ga102_user_new,
+};
+
+static int
+ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
+{
+       if (index == 0) {
+               oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A };
+               *class = &ga102_user_oclass;
+               return 0;
+       } else
+       if (index == 1) {
+               oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B };
+               *class = &ga102_chan_oclass;
+               return 0;
+       }
+
+       return 2;
+}
+
+static int
+ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+       switch (mthd) {
+       case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0;
+       default:
+               break;
+       }
+
+       return -ENOSYS;
+}
+
+static void *
+ga102_fifo_dtor(struct nvkm_engine *engine)
+{
+       return ga102_fifo(engine);
+}
+
+static const struct nvkm_engine_func
+ga102_fifo = {
+       .dtor = ga102_fifo_dtor,
+       .info = ga102_fifo_info,
+       .base.sclass = ga102_fifo_sclass,
+};
+
+int
+ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+              struct nvkm_fifo **pfifo)
+{
+       struct ga102_fifo *fifo;
+
+       if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine);
+       *pfifo = &fifo->base;
+       return 0;
+}
index 31933f3..c982d83 100644 (file)
@@ -54,7 +54,7 @@ ga100_top_oneinit(struct nvkm_top *top)
                        info->reset   = (data & 0x0000001f);
                        break;
                case 2:
-                       info->runlist = (data & 0x0000fc00) >> 10;
+                       info->runlist = (data & 0x00fffc00);
                        info->engine  = (data & 0x00000003);
                        break;
                default:
@@ -85,9 +85,10 @@ ga100_top_oneinit(struct nvkm_top *top)
                }
 
                nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
-                                  "runlist %2d engine %2d reset %2d\n", type, inst,
+                                  "runlist %6x engine %2d reset %2d\n", type, inst,
                           info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type],
-                          info->addr, info->fault, info->runlist, info->engine, info->reset);
+                          info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist,
+                          info->engine, info->reset);
                info = NULL;
        }
 
index 2d8794d..3d8a9ab 100644 (file)
@@ -146,8 +146,8 @@ static const struct reg_sequence y030xx067a_init_sequence[] = {
        { 0x09, REG09_SUB_BRIGHT_R(0x20) },
        { 0x0a, REG0A_SUB_BRIGHT_B(0x20) },
        { 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN },
-       { 0x0c, REG0C_CONTRAST_R(0x10) },
-       { 0x0d, REG0D_CONTRAST_G(0x10) },
+       { 0x0c, REG0C_CONTRAST_R(0x00) },
+       { 0x0d, REG0D_CONTRAST_G(0x00) },
        { 0x0e, REG0E_CONTRAST_B(0x10) },
        { 0x0f, 0 },
        { 0x10, REG10_BRIGHT(0x7f) },
index ba9e14d..a25b98b 100644 (file)
@@ -1174,26 +1174,24 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
         *
         * Action plan:
         *
-        * 1. When DRM gives us a mode, we should add 999 Hz to it.  That way
-        *    if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
-        *    make 60000 kHz then the clock framework will actually give us
-        *    the right clock.
+        * 1. Try to set the exact rate first, and confirm the clock framework
+        *    can provide it.
         *
-        *    NOTE: if the PLL (maybe through a divider) could actually make
-        *    a clock rate 999 Hz higher instead of the one we want then this
-        *    could be a problem.  Unfortunately there's not much we can do
-        *    since it's baked into DRM to use kHz.  It shouldn't matter in
-        *    practice since Rockchip PLLs are controlled by tables and
-        *    even if there is a divider in the middle I wouldn't expect PLL
-        *    rates in the table that are just a few kHz different.
+        * 2. If the clock framework cannot provide the exact rate, we should
+        *    add 999 Hz to the requested rate.  That way if the clock we need
+        *    is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then
+        *    the clock framework will actually give us the right clock.
         *
-        * 2. Get the clock framework to round the rate for us to tell us
+        * 3. Get the clock framework to round the rate for us to tell us
         *    what it will actually make.
         *
-        * 3. Store the rounded up rate so that we don't need to worry about
+        * 4. Store the rounded up rate so that we don't need to worry about
         *    this in the actual clk_set_rate().
         */
-       rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+       rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000);
+       if (rate / 1000 != adjusted_mode->clock)
+               rate = clk_round_rate(vop->dclk,
+                                     adjusted_mode->clock * 1000 + 999);
        adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
 
        return true;
index f75fb15..016b877 100644 (file)
@@ -216,11 +216,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                goto err_disable_clk_tmds;
        }
 
+       ret = sun8i_hdmi_phy_init(hdmi->phy);
+       if (ret)
+               goto err_disable_clk_tmds;
+
        drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
        drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
-       sun8i_hdmi_phy_init(hdmi->phy);
-
        plat_data->mode_valid = hdmi->quirks->mode_valid;
        plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
        sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
@@ -262,6 +264,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
        struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
 
        dw_hdmi_unbind(hdmi->hdmi);
+       sun8i_hdmi_phy_deinit(hdmi->phy);
        clk_disable_unprepare(hdmi->clk_tmds);
        reset_control_assert(hdmi->rst_ctrl);
        gpiod_set_value(hdmi->ddc_en, 0);
index 74f6ed0..bffe1b9 100644 (file)
@@ -169,6 +169,7 @@ struct sun8i_hdmi_phy {
        struct clk                      *clk_phy;
        struct clk                      *clk_pll0;
        struct clk                      *clk_pll1;
+       struct device                   *dev;
        unsigned int                    rcal;
        struct regmap                   *regs;
        struct reset_control            *rst_phy;
@@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
 
 int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
 
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy);
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
                            struct dw_hdmi_plat_data *plat_data);
 
index c923970..b64d93d 100644 (file)
@@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
        phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2;
 }
 
-void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
+int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
 {
+       int ret;
+
+       ret = reset_control_deassert(phy->rst_phy);
+       if (ret) {
+               dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(phy->clk_bus);
+       if (ret) {
+               dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret);
+               goto err_assert_rst_phy;
+       }
+
+       ret = clk_prepare_enable(phy->clk_mod);
+       if (ret) {
+               dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret);
+               goto err_disable_clk_bus;
+       }
+
+       if (phy->variant->has_phy_clk) {
+               ret = sun8i_phy_clk_create(phy, phy->dev,
+                                          phy->variant->has_second_pll);
+               if (ret) {
+                       dev_err(phy->dev, "Couldn't create the PHY clock\n");
+                       goto err_disable_clk_mod;
+               }
+
+               clk_prepare_enable(phy->clk_phy);
+       }
+
        phy->variant->phy_init(phy);
+
+       return 0;
+
+err_disable_clk_mod:
+       clk_disable_unprepare(phy->clk_mod);
+err_disable_clk_bus:
+       clk_disable_unprepare(phy->clk_bus);
+err_assert_rst_phy:
+       reset_control_assert(phy->rst_phy);
+
+       return ret;
+}
+
+void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy)
+{
+       clk_disable_unprepare(phy->clk_mod);
+       clk_disable_unprepare(phy->clk_bus);
+       clk_disable_unprepare(phy->clk_phy);
+
+       reset_control_assert(phy->rst_phy);
 }
 
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
@@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        phy->variant = (struct sun8i_hdmi_phy_variant *)match->data;
+       phy->dev = dev;
 
        ret = of_address_to_resource(node, 0, &res);
        if (ret) {
@@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
                goto err_put_clk_pll1;
        }
 
-       ret = reset_control_deassert(phy->rst_phy);
-       if (ret) {
-               dev_err(dev, "Cannot deassert phy reset control: %d\n", ret);
-               goto err_put_rst_phy;
-       }
-
-       ret = clk_prepare_enable(phy->clk_bus);
-       if (ret) {
-               dev_err(dev, "Cannot enable bus clock: %d\n", ret);
-               goto err_deassert_rst_phy;
-       }
-
-       ret = clk_prepare_enable(phy->clk_mod);
-       if (ret) {
-               dev_err(dev, "Cannot enable mod clock: %d\n", ret);
-               goto err_disable_clk_bus;
-       }
-
-       if (phy->variant->has_phy_clk) {
-               ret = sun8i_phy_clk_create(phy, dev,
-                                          phy->variant->has_second_pll);
-               if (ret) {
-                       dev_err(dev, "Couldn't create the PHY clock\n");
-                       goto err_disable_clk_mod;
-               }
-
-               clk_prepare_enable(phy->clk_phy);
-       }
-
        platform_set_drvdata(pdev, phy);
 
        return 0;
 
-err_disable_clk_mod:
-       clk_disable_unprepare(phy->clk_mod);
-err_disable_clk_bus:
-       clk_disable_unprepare(phy->clk_bus);
-err_deassert_rst_phy:
-       reset_control_assert(phy->rst_phy);
-err_put_rst_phy:
-       reset_control_put(phy->rst_phy);
 err_put_clk_pll1:
        clk_put(phy->clk_pll1);
 err_put_clk_pll0:
@@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
 {
        struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
 
-       clk_disable_unprepare(phy->clk_mod);
-       clk_disable_unprepare(phy->clk_bus);
-       clk_disable_unprepare(phy->clk_phy);
-
-       reset_control_assert(phy->rst_phy);
-
        reset_control_put(phy->rst_phy);
 
        clk_put(phy->clk_pll0);
index b4b4653..ed8a4b7 100644 (file)
@@ -1395,14 +1395,6 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
        return 0;
 }
 
-static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = {
-       SND_SOC_DAPM_OUTPUT("TX"),
-};
-
-static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
-       { "TX", NULL, "Playback" },
-};
-
 static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
        .name = "vc4-hdmi-cpu-dai-component",
 };
index 4e0b7c2..015e11c 100644 (file)
@@ -49,7 +49,7 @@
 #define MLXCPLD_LPCI2C_NACK_IND                2
 
 #define MLXCPLD_I2C_FREQ_1000KHZ_SET   0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0f
+#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0c
 #define MLXCPLD_I2C_FREQ_100KHZ_SET    0x42
 
 enum mlxcpld_i2c_frequency {
@@ -495,7 +495,7 @@ mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
                return err;
 
        /* Set frequency only if it is not 100KHz, which is default. */
-       switch ((data->reg & data->mask) >> data->bit) {
+       switch ((regval & data->mask) >> data->bit) {
        case MLXCPLD_I2C_FREQ_1000KHZ:
                freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
                break;
index 477480d..7d4b3eb 100644 (file)
@@ -41,6 +41,8 @@
 #define I2C_HANDSHAKE_RST              0x0020
 #define I2C_FIFO_ADDR_CLR              0x0001
 #define I2C_DELAY_LEN                  0x0002
+#define I2C_ST_START_CON               0x8001
+#define I2C_FS_START_CON               0x1800
 #define I2C_TIME_CLR_VALUE             0x0000
 #define I2C_TIME_DEFAULT_VALUE         0x0003
 #define I2C_WRRD_TRANAC_VALUE          0x0002
@@ -480,6 +482,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
        u16 intr_stat_reg;
+       u16 ext_conf_val;
 
        mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
        intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
@@ -518,8 +521,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
        if (i2c->dev_comp->ltiming_adjust)
                mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
 
+       if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
+               ext_conf_val = I2C_ST_START_CON;
+       else
+               ext_conf_val = I2C_FS_START_CON;
+
        if (i2c->dev_comp->timing_adjust) {
-               mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
+               ext_conf_val = i2c->ac_timing.ext;
                mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
                               OFFSET_CLOCK_DIV);
                mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
@@ -544,6 +552,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
                                       OFFSET_HS_STA_STO_AC_TIMING);
                }
        }
+       mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
 
        /* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
        if (i2c->have_pmic)
index aaeeacc..546cc93 100644 (file)
@@ -454,6 +454,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
                        break;
 
                i2c_acpi_register_device(adapter, adev, &info);
+               put_device(&adapter->dev);
                break;
        case ACPI_RECONFIG_DEVICE_REMOVE:
                if (!acpi_device_enumerated(adev))
index f1099b4..467519a 100644 (file)
@@ -5,3 +5,4 @@
 
 # Keep in alphabetical order
 obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
+CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
index 124c41a..c5c71b7 100644 (file)
@@ -308,7 +308,6 @@ config APPLE_DART
 config ARM_SMMU
        tristate "ARM Ltd. System MMU (SMMU) Support"
        depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
-       depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU if ARM
@@ -438,7 +437,7 @@ config QCOM_IOMMU
        # Note: iommu drivers cannot (yet?) be built as modules
        bool "Qualcomm IOMMU Support"
        depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
-       depends on QCOM_SCM=y
+       select QCOM_SCM
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
        select ARM_DMA_USE_IOMMU
index e240a7b..b0cc01a 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
 obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
index 9f465e1..2c25cce 100644 (file)
@@ -215,7 +215,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
            of_device_is_compatible(np, "nvidia,tegra186-smmu"))
                return nvidia_smmu_impl_init(smmu);
 
-       smmu = qcom_smmu_impl_init(smmu);
+       if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
+               smmu = qcom_smmu_impl_init(smmu);
 
        if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
                smmu->impl = &mrvl_mmu500_impl;
index 157c924..80321e0 100644 (file)
@@ -565,7 +565,7 @@ config VIDEO_QCOM_VENUS
        depends on VIDEO_DEV && VIDEO_V4L2 && QCOM_SMEM
        depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
        select QCOM_MDT_LOADER if ARCH_QCOM
-       select QCOM_SCM if ARCH_QCOM
+       select QCOM_SCM
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
        help
index 7131396..95b3511 100644 (file)
@@ -547,7 +547,7 @@ config MMC_SDHCI_MSM
        depends on MMC_SDHCI_PLTFM
        select MMC_SDHCI_IO_ACCESSORS
        select MMC_CQHCI
-       select QCOM_SCM if MMC_CRYPTO && ARCH_QCOM
+       select QCOM_SCM if MMC_CRYPTO
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
          support present in Qualcomm SOCs. The controller supports
index 3f28eb4..8f36536 100644 (file)
@@ -746,7 +746,7 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
        writel(start, host->regs + SD_EMMC_START);
 }
 
-/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
+/* local sg copy for dram_access_quirk */
 static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
                                  size_t buflen, bool to_buffer)
 {
@@ -764,21 +764,27 @@ static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data
        sg_miter_start(&miter, sgl, nents, sg_flags);
 
        while ((offset < buflen) && sg_miter_next(&miter)) {
-               unsigned int len;
+               unsigned int buf_offset = 0;
+               unsigned int len, left;
+               u32 *buf = miter.addr;
 
                len = min(miter.length, buflen - offset);
+               left = len;
 
-               /* When dram_access_quirk, the bounce buffer is a iomem mapping */
-               if (host->dram_access_quirk) {
-                       if (to_buffer)
-                               memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
-                       else
-                               memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
+               if (to_buffer) {
+                       do {
+                               writel(*buf++, host->bounce_iomem_buf + offset + buf_offset);
+
+                               buf_offset += 4;
+                               left -= 4;
+                       } while (left);
                } else {
-                       if (to_buffer)
-                               memcpy(host->bounce_buf + offset, miter.addr, len);
-                       else
-                               memcpy(miter.addr, host->bounce_buf + offset, len);
+                       do {
+                               *buf++ = readl(host->bounce_iomem_buf + offset + buf_offset);
+
+                               buf_offset += 4;
+                               left -= 4;
+                       } while (left);
                }
 
                offset += len;
@@ -830,7 +836,11 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
                if (data->flags & MMC_DATA_WRITE) {
                        cmd_cfg |= CMD_CFG_DATA_WR;
                        WARN_ON(xfer_bytes > host->bounce_buf_size);
-                       meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+                       if (host->dram_access_quirk)
+                               meson_mmc_copy_buffer(host, data, xfer_bytes, true);
+                       else
+                               sg_copy_to_buffer(data->sg, data->sg_len,
+                                                 host->bounce_buf, xfer_bytes);
                        dma_wmb();
                }
 
@@ -849,12 +859,43 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
        writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
 }
 
+static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data *data)
+{
+       struct scatterlist *sg;
+       int i;
+
+       /* Reject request if any element offset or size is not 32bit aligned */
+       for_each_sg(data->sg, sg, data->sg_len, i) {
+               if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+                   !IS_ALIGNED(sg->length, sizeof(u32))) {
+                       dev_err(mmc_dev(mmc), "unaligned sg offset %u len %u\n",
+                               data->sg->offset, data->sg->length);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
        struct meson_host *host = mmc_priv(mmc);
        bool needs_pre_post_req = mrq->data &&
                        !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
 
+       /*
+        * The memory at the end of the controller used as bounce buffer for
+        * the dram_access_quirk only accepts 32bit read/write access,
+        * check the aligment and length of the data before starting the request.
+        */
+       if (host->dram_access_quirk && mrq->data) {
+               mrq->cmd->error = meson_mmc_validate_dram_access(mmc, mrq->data);
+               if (mrq->cmd->error) {
+                       mmc_request_done(mmc, mrq);
+                       return;
+               }
+       }
+
        if (needs_pre_post_req) {
                meson_mmc_get_transfer_mode(mmc, mrq);
                if (!meson_mmc_desc_chain_mode(mrq->data))
@@ -999,7 +1040,11 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
        if (meson_mmc_bounce_buf_read(data)) {
                xfer_bytes = data->blksz * data->blocks;
                WARN_ON(xfer_bytes > host->bounce_buf_size);
-               meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+               if (host->dram_access_quirk)
+                       meson_mmc_copy_buffer(host, data, xfer_bytes, false);
+               else
+                       sg_copy_from_buffer(data->sg, data->sg_len,
+                                           host->bounce_buf, xfer_bytes);
        }
 
        next_cmd = meson_mmc_get_next_command(cmd);
index 5564d7b..d1a1c54 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/slot-gpio.h>
@@ -61,7 +62,6 @@ static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
 static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
 {
        u16 clk;
-       unsigned long timeout;
 
        host->mmc->actual_clock = 0;
 
@@ -86,16 +86,11 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
        sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
        /* Wait max 20 ms */
-       timeout = 20;
-       while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
-               & SDHCI_CLOCK_INT_STABLE)) {
-               if (timeout == 0) {
-                       pr_err("%s: Internal clock never stabilised.\n",
-                              mmc_hostname(host->mmc));
-                       return;
-               }
-               timeout--;
-               mdelay(1);
+       if (read_poll_timeout(sdhci_readw, clk, (clk & SDHCI_CLOCK_INT_STABLE),
+                             1000, 20000, false, host, SDHCI_CLOCK_CONTROL)) {
+               pr_err("%s: Internal clock never stabilised.\n",
+                      mmc_hostname(host->mmc));
+               return;
        }
 
        clk |= SDHCI_CLOCK_CARD_EN;
@@ -114,6 +109,7 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+       unsigned int tmp;
 
        sdhci_reset(host, mask);
 
@@ -126,6 +122,10 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 
                sdhci_writel(host, calcr | SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
                             SDMMC_CALCR);
+
+               if (read_poll_timeout(sdhci_readl, tmp, !(tmp & SDMMC_CALCR_EN),
+                                     10, 20000, false, host, SDMMC_CALCR))
+                       dev_err(mmc_dev(host->mmc), "Failed to calibrate\n");
        }
 }
 
index 8f99cfa..d037682 100644 (file)
@@ -4,6 +4,7 @@ config QCOM_IPA
        depends on ARCH_QCOM || COMPILE_TEST
        depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
        select QCOM_MDT_LOADER if ARCH_QCOM
+       select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
          Choose Y or M here to include support for the Qualcomm
index f87f175..b554054 100644 (file)
@@ -117,6 +117,7 @@ config USB_LAN78XX
        select PHYLIB
        select MICROCHIP_PHY
        select FIXED_PHY
+       select CRC32
        help
          This option adds support for Microchip LAN78XX based USB 2
          & USB 3 10/100/1000 Ethernet adapters.
index 741289e..ca007b8 100644 (file)
@@ -44,7 +44,7 @@ config ATH10K_SNOC
        tristate "Qualcomm ath10k SNOC support"
        depends on ATH10K
        depends on ARCH_QCOM || COMPILE_TEST
-       depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
+       select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
          This module adds support for integrated WCN3990 chip connected
index f720c0d..0ac1725 100644 (file)
@@ -36,6 +36,7 @@ LIST_HEAD(aliases_lookup);
 struct device_node *of_root;
 EXPORT_SYMBOL(of_root);
 struct device_node *of_chosen;
+EXPORT_SYMBOL(of_chosen);
 struct device_node *of_aliases;
 struct device_node *of_stdout;
 static const char *of_stdout_options;
index 0148687..dcefdb4 100644 (file)
@@ -62,14 +62,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
        struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
                                             hotplug_slot);
 
-       switch (zdev->state) {
-       case ZPCI_FN_STATE_STANDBY:
-               *value = 0;
-               break;
-       default:
-               *value = 1;
-               break;
-       }
+       *value = zpci_is_device_configured(zdev) ? 1 : 0;
        return 0;
 }
 
index 0f40943..260a06f 100644 (file)
@@ -1249,6 +1249,9 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
        bool check_children;
        u64 addr;
 
+       if (!dev->parent)
+               return NULL;
+
        down_read(&pci_acpi_companion_lookup_sem);
 
        adev = pci_acpi_find_companion_hook ?
index 32ea2a8..5ff4207 100644 (file)
@@ -3,7 +3,8 @@ if (ARCH_QCOM || COMPILE_TEST)
 
 config PINCTRL_MSM
        tristate "Qualcomm core pin controller driver"
-       depends on GPIOLIB && (QCOM_SCM || !QCOM_SCM) #if QCOM_SCM=m this can't be =y
+       depends on GPIOLIB
+       select QCOM_SCM
        select PINMUX
        select PINCONF
        select GENERIC_PINCONF
index 7646708..a916cd8 100644 (file)
@@ -98,7 +98,7 @@ mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
                        if (ret)
                                goto access_error;
 
-                       *regval |= rol32(val, regsize * i);
+                       *regval |= rol32(val, regsize * i * 8);
                }
        }
 
@@ -141,7 +141,7 @@ mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        /* Convert buffer to input value. */
-       ret = kstrtou32(buf, len, &input_val);
+       ret = kstrtou32(buf, 0, &input_val);
        if (ret)
                return ret;
 
index d6a7c89..fc95620 100644 (file)
@@ -476,6 +476,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0006", 0},
        {"AMDI0007", 0},
        {"AMD0004", 0},
+       {"AMD0005", 0},
        { }
 };
 MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
index 42513ea..2fffa57 100644 (file)
@@ -167,6 +167,7 @@ config DELL_WMI
 config DELL_WMI_PRIVACY
        bool "Dell WMI Hardware Privacy Support"
        depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
+       depends on DELL_WMI
        help
          This option adds integration with the "Dell Hardware Privacy"
          feature of Dell laptops to the dell-wmi driver.
index d53634c..658bab4 100644 (file)
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
index 379560f..e03943e 100644 (file)
@@ -42,12 +42,20 @@ static void update_sar_data(struct wwan_sar_context *context)
 
        if (config->device_mode_info &&
            context->sar_data.device_mode < config->total_dev_mode) {
-               struct wwan_device_mode_info *dev_mode =
-                       &config->device_mode_info[context->sar_data.device_mode];
-
-               context->sar_data.antennatable_index = dev_mode->antennatable_index;
-               context->sar_data.bandtable_index = dev_mode->bandtable_index;
-               context->sar_data.sartable_index = dev_mode->sartable_index;
+               int itr = 0;
+
+               for (itr = 0; itr < config->total_dev_mode; itr++) {
+                       if (context->sar_data.device_mode ==
+                               config->device_mode_info[itr].device_mode) {
+                               struct wwan_device_mode_info *dev_mode =
+                               &config->device_mode_info[itr];
+
+                               context->sar_data.antennatable_index = dev_mode->antennatable_index;
+                               context->sar_data.bandtable_index = dev_mode->bandtable_index;
+                               context->sar_data.sartable_index = dev_mode->sartable_index;
+                               break;
+                       }
+               }
        }
 }
 
@@ -305,7 +313,6 @@ static struct platform_driver sar_driver = {
        .remove = sar_remove,
        .driver = {
                .name = DRVNAME,
-               .owner = THIS_MODULE,
                .acpi_match_table = ACPI_PTR(sar_device_ids)
        }
 };
@@ -313,4 +320,4 @@ module_platform_driver(sar_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Platform device driver for INTEL MODEM BIOS SAR");
-MODULE_AUTHOR("Shravan S <s.shravan@intel.com>");
+MODULE_AUTHOR("Shravan Sudhakar <s.shravan@intel.com>");
index 9fe0a25..e59d79c 100644 (file)
@@ -401,7 +401,7 @@ int skl_int3472_discrete_remove(struct platform_device *pdev)
 
        gpiod_remove_lookup_table(&int3472->gpios);
 
-       if (int3472->clock.ena_gpio)
+       if (int3472->clock.cl)
                skl_int3472_unregister_clock(int3472);
 
        gpiod_put(int3472->clock.ena_gpio);
index bfa0cc2..7cc9089 100644 (file)
@@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
 #define IPC_READ_BUFFER                0x90
 
 /* Timeout in jiffies */
-#define IPC_TIMEOUT            (5 * HZ)
+#define IPC_TIMEOUT            (10 * HZ)
 
 static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
@@ -232,7 +232,7 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
 /* Wait till scu status is busy */
 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
 {
-       unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
+       unsigned long end = jiffies + IPC_TIMEOUT;
 
        do {
                u32 status;
@@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
        return -ETIMEDOUT;
 }
 
-/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
+/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
 {
        int status;
index d0096cd..4991054 100644 (file)
@@ -31,10 +31,10 @@ int kvm_arch_ptp_init(void)
 
        ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
                             KVM_CLOCK_PAIRING_WALLCLOCK);
-       if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+       if (ret == -KVM_ENOSYS)
                return -ENODEV;
 
-       return 0;
+       return ret;
 }
 
 int kvm_arch_ptp_get_clock(struct timespec64 *ts)
index b4cb5fb..0cc62c1 100644 (file)
@@ -1776,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
        host->scsi.disconnectable = 0;
        if (host->SCpnt->device->id  == host->scsi.reconnected.target &&
            host->SCpnt->device->lun == host->scsi.reconnected.lun &&
-           scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) {
+           scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
            DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
                    host->host->host_no, acornscsi_target(host)));
index 40fb3a7..cf2e41d 100644 (file)
@@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
        struct efct *efct;
        struct efct_xport *xport;
        struct efct_io *io;
-       unsigned long flags = 0;
+       unsigned long flags;
 
        efct = node->efct;
 
@@ -44,7 +44,6 @@ efct_scsi_io_alloc(struct efct_node *node)
        if (!io) {
                efc_log_err(efct, "IO alloc Failed\n");
                atomic_add_return(1, &xport->io_alloc_failed_count);
-               spin_unlock_irqrestore(&node->active_ios_lock, flags);
                return NULL;
        }
 
index 4683c18..5bc91d3 100644 (file)
@@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
                return FAILED;
        }
 
-       conn = session->leadconn;
-       iscsi_get_conn(conn->cls_conn);
-       conn->eh_abort_cnt++;
-       age = session->age;
-
        spin_lock(&session->back_lock);
        task = (struct iscsi_task *)sc->SCp.ptr;
        if (!task || !task->sc) {
@@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
                ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
 
                spin_unlock(&session->back_lock);
-               goto success;
+               spin_unlock_bh(&session->frwd_lock);
+               mutex_unlock(&session->eh_mutex);
+               return SUCCESS;
        }
+
+       conn = session->leadconn;
+       iscsi_get_conn(conn->cls_conn);
+       conn->eh_abort_cnt++;
+       age = session->age;
+
        ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
        __iscsi_get_task(task);
        spin_unlock(&session->back_lock);
index 78ce38d..026a119 100644 (file)
@@ -12292,12 +12292,12 @@ void
 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                     struct lpfc_iocbq *rspiocb)
 {
-       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+       struct lpfc_nodelist *ndlp = NULL;
        IOCB_t *irsp = &rspiocb->iocb;
 
        /* ELS cmd tag <ulpIoTag> completes */
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "0139 Ignoring ELS cmd tag x%x completion Data: "
+                       "0139 Ignoring ELS cmd code x%x completion Data: "
                        "x%x x%x x%x\n",
                        irsp->ulpIoTag, irsp->ulpStatus,
                        irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -12305,10 +12305,13 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
         * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
         * if exchange is busy.
         */
-       if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+       if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+               ndlp = cmdiocb->context_un.ndlp;
                lpfc_ct_free_iocb(phba, cmdiocb);
-       else
+       } else {
+               ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
                lpfc_els_free_iocb(phba, cmdiocb);
+       }
 
        lpfc_nlp_put(ndlp);
 }
index 188de6f..95be7ec 100644 (file)
@@ -6377,27 +6377,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
        return retval;
 }
 
-struct ctm_info {
-       struct ufs_hba  *hba;
-       unsigned long   pending;
-       unsigned int    ncpl;
-};
-
-static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
-{
-       struct ctm_info *const ci = priv;
-       struct completion *c;
-
-       WARN_ON_ONCE(reserved);
-       if (test_bit(req->tag, &ci->pending))
-               return true;
-       ci->ncpl++;
-       c = req->end_io_data;
-       if (c)
-               complete(c);
-       return true;
-}
-
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
@@ -6408,18 +6387,24 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
  */
 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
-       unsigned long flags;
-       struct request_queue *q = hba->tmf_queue;
-       struct ctm_info ci = {
-               .hba     = hba,
-       };
+       unsigned long flags, pending, issued;
+       irqreturn_t ret = IRQ_NONE;
+       int tag;
+
+       pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-       blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+       issued = hba->outstanding_tasks & ~pending;
+       for_each_set_bit(tag, &issued, hba->nutmrs) {
+               struct request *req = hba->tmf_rqs[tag];
+               struct completion *c = req->end_io_data;
+
+               complete(c);
+               ret = IRQ_HANDLED;
+       }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
+       return ret;
 }
 
 /**
@@ -6542,9 +6527,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
-       blk_mq_start_request(req);
 
        task_tag = req->tag;
+       hba->tmf_rqs[req->tag] = req;
        treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
 
        memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
@@ -6585,6 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        }
 
        spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->tmf_rqs[req->tag] = NULL;
        __clear_bit(task_tag, &hba->outstanding_tasks);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
@@ -9635,6 +9621,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
                err = PTR_ERR(hba->tmf_queue);
                goto free_tmf_tag_set;
        }
+       hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
+                                   sizeof(*hba->tmf_rqs), GFP_KERNEL);
+       if (!hba->tmf_rqs) {
+               err = -ENOMEM;
+               goto free_tmf_queue;
+       }
 
        /* Reset the attached device */
        ufshcd_device_reset(hba);
index f0da5d3..41f6e06 100644 (file)
@@ -828,6 +828,7 @@ struct ufs_hba {
 
        struct blk_mq_tag_set tmf_tag_set;
        struct request_queue *tmf_queue;
+       struct request **tmf_rqs;
 
        struct uic_command *active_uic_cmd;
        struct mutex uic_cmd_mutex;
index da19d79..78fd365 100644 (file)
@@ -7,6 +7,7 @@ thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
 thunderbolt-${CONFIG_ACPI} += acpi.o
 thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
 thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
+CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
 
 thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
 obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o
index 8f143c0..f0bf01e 100644 (file)
@@ -618,10 +618,8 @@ static int __init xenboot_console_setup(struct console *console, char *string)
 {
        static struct xencons_info xenboot;
 
-       if (xen_initial_domain())
+       if (xen_initial_domain() || !xen_pv_domain())
                return 0;
-       if (!xen_pv_domain())
-               return -ENODEV;
 
        return xencons_info_pv_init(&xenboot, 0);
 }
@@ -632,17 +630,16 @@ static void xenboot_write_console(struct console *console, const char *string,
        unsigned int linelen, off = 0;
        const char *pos;
 
+       if (dom0_write_console(0, string, len) >= 0)
+               return;
+
        if (!xen_pv_domain()) {
                xen_hvm_early_write(0, string, len);
                return;
        }
 
-       dom0_write_console(0, string, len);
-
-       if (xen_initial_domain())
+       if (domU_write_console(0, "(early) ", 8) < 0)
                return;
-
-       domU_write_console(0, "(early) ", 8);
        while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
                linelen = pos-string+off;
                if (off + linelen > len)
index 8b7bc10..f1d1006 100644 (file)
@@ -420,11 +420,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
        data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
        if (IS_ERR(data->phy)) {
                ret = PTR_ERR(data->phy);
-               /* Return -EINVAL if no usbphy is available */
-               if (ret == -ENODEV)
-                       data->phy = NULL;
-               else
-                       goto err_clk;
+               if (ret == -ENODEV) {
+                       data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+                       if (IS_ERR(data->phy)) {
+                               ret = PTR_ERR(data->phy);
+                               if (ret == -ENODEV)
+                                       data->phy = NULL;
+                               else
+                                       goto err_clk;
+                       }
+               }
        }
 
        pdata.usb_phy = data->phy;
index 4e2f155..7b2e242 100644 (file)
@@ -340,6 +340,9 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
                        acm->iocount.overrun++;
                spin_unlock_irqrestore(&acm->read_lock, flags);
 
+               if (newctrl & ACM_CTRL_BRK)
+                       tty_flip_buffer_push(&acm->port);
+
                if (difference)
                        wake_up_all(&acm->wioctl);
 
@@ -475,11 +478,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags)
 
 static void acm_process_read_urb(struct acm *acm, struct urb *urb)
 {
+       unsigned long flags;
+
        if (!urb->actual_length)
                return;
 
+       spin_lock_irqsave(&acm->read_lock, flags);
        tty_insert_flip_string(&acm->port, urb->transfer_buffer,
                        urb->actual_length);
+       spin_unlock_irqrestore(&acm->read_lock, flags);
+
        tty_flip_buffer_push(&acm->port);
 }
 
index 35d5908..fdf79bc 100644 (file)
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
 };
 
 /* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN_CORE
+#ifdef CONFIG_WWAN
 static int wdm_wwan_port_start(struct wwan_port *port)
 {
        struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
        /* inbuf has been copied, it is safe to check for outstanding data */
        schedule_work(&desc->service_outs_intr);
 }
-#else /* CONFIG_WWAN_CORE */
+#else /* CONFIG_WWAN */
 static void wdm_wwan_init(struct wdm_device *desc) {}
 static void wdm_wwan_deinit(struct wdm_device *desc) {}
 static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN_CORE */
+#endif /* CONFIG_WWAN */
 
 /* --- error handling --- */
 static void wdm_rxwork(struct work_struct *work)
index 5e8a04e..b856622 100644 (file)
@@ -6,8 +6,7 @@ config USB_COMMON
 
 config USB_LED_TRIG
        bool "USB LED Triggers"
-       depends on LEDS_CLASS && LEDS_TRIGGERS
-       select USB_COMMON
+       depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS
        help
          This option adds LED triggers for USB host and/or gadget activity.
 
index 804b505..4519d06 100644 (file)
@@ -4243,7 +4243,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        }
 
 
-       usb_initialize_gadget(dwc->sysdev, dwc->gadget, dwc_gadget_release);
+       usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
        dev                             = &dwc->gadget->dev;
        dev->platform_data              = dwc;
        dwc->gadget->ops                = &dwc3_gadget_ops;
index be86456..ef55b8b 100644 (file)
@@ -674,11 +674,17 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
                ssize = uac2_opts->c_ssize;
        }
 
-       if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC))
+       if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
+         // Win10 requires max packet size + 1 frame
                srate = srate * (1000 + uac2_opts->fb_max) / 1000;
-
-       max_size_bw = num_channels(chmask) * ssize *
-               DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
+               // updated srate is always bigger, therefore DIV_ROUND_UP always yields +1
+               max_size_bw = num_channels(chmask) * ssize *
+                       (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))));
+       } else {
+               // adding 1 frame provision for Win10
+               max_size_bw = num_channels(chmask) * ssize *
+                       (DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))) + 1);
+       }
        ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
                                                    max_size_ep));
 
index 575fa89..1bf494b 100644 (file)
@@ -1787,7 +1787,6 @@ static int tegra_xusb_remove(struct platform_device *pdev)
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
 static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
 {
        struct device *dev = hub->hcd->self.controller;
@@ -2102,7 +2101,7 @@ out:
        return err;
 }
 
-static int tegra_xusb_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_suspend(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2144,7 +2143,7 @@ out:
        return err;
 }
 
-static int tegra_xusb_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_resume(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2174,10 +2173,8 @@ static int tegra_xusb_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int tegra_xusb_runtime_suspend(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_suspend(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int ret;
@@ -2190,7 +2187,7 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
        return ret;
 }
 
-static int tegra_xusb_runtime_resume(struct device *dev)
+static __maybe_unused int tegra_xusb_runtime_resume(struct device *dev)
 {
        struct tegra_xusb *tegra = dev_get_drvdata(dev);
        int err;
@@ -2201,7 +2198,6 @@ static int tegra_xusb_runtime_resume(struct device *dev)
 
        return err;
 }
-#endif
 
 static const struct dev_pm_ops tegra_xusb_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra_xusb_runtime_suspend,
index 9858716..c15eec9 100644 (file)
@@ -696,7 +696,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
                tcpm_pd_receive(tcpci->port, &msg);
        }
 
-       if (status & TCPC_ALERT_EXTENDED_STATUS) {
+       if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
                ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
                if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
                        tcpm_vbus_change(tcpci->port);
index a4d3720..7f2f3ff 100644 (file)
@@ -4876,6 +4876,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
                        tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
                break;
        case SRC_ATTACHED:
+       case SRC_STARTUP:
        case SRC_SEND_CAPABILITIES:
        case SRC_READY:
                if (tcpm_port_is_disconnected(port) ||
index 21b3ae2..ea4cc0a 100644 (file)
@@ -625,10 +625,6 @@ static int tps6598x_probe(struct i2c_client *client)
        if (ret < 0)
                return ret;
 
-       fwnode = device_get_named_child_node(&client->dev, "connector");
-       if (!fwnode)
-               return -ENODEV;
-
        /*
         * This fwnode has a "compatible" property, but is never populated as a
         * struct device. Instead we simply parse it to read the properties.
@@ -636,7 +632,9 @@ static int tps6598x_probe(struct i2c_client *client)
         * with existing DT files, we work around this by deleting any
         * fwnode_links to/from this fwnode.
         */
-       fw_devlink_purge_absent_suppliers(fwnode);
+       fwnode = device_get_named_child_node(&client->dev, "connector");
+       if (fwnode)
+               fw_devlink_purge_absent_suppliers(fwnode);
 
        tps->role_sw = fwnode_usb_role_switch_get(fwnode);
        if (IS_ERR(tps->role_sw)) {
index b26b79d..6ed5e60 100644 (file)
@@ -2193,8 +2193,9 @@ config FB_HYPERV
          This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
 
 config FB_SIMPLE
-       bool "Simple framebuffer support"
-       depends on (FB = y) && !DRM_SIMPLEDRM
+       tristate "Simple framebuffer support"
+       depends on FB
+       depends on !DRM_SIMPLEDRM
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index c5b99a4..6b4d5a7 100644 (file)
@@ -1267,7 +1267,7 @@ static struct platform_device *gbefb_device;
 static int __init gbefb_init(void)
 {
        int ret = platform_driver_register(&gbefb_driver);
-       if (!ret) {
+       if (IS_ENABLED(CONFIG_SGI_IP32) && !ret) {
                gbefb_device = platform_device_alloc("gbefb", 0);
                if (gbefb_device) {
                        ret = platform_device_add(gbefb_device);
index 22f5aff..1b2c3ac 100644 (file)
@@ -241,7 +241,7 @@ config XEN_PRIVCMD
 
 config XEN_ACPI_PROCESSOR
        tristate "Xen ACPI processor"
-       depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+       depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
        default m
        help
          This ACPI processor uploads Power Management information to the Xen
@@ -259,7 +259,7 @@ config XEN_ACPI_PROCESSOR
 
 config XEN_MCE_LOG
        bool "Xen platform mcelog"
-       depends on XEN_DOM0 && X86_MCE
+       depends on XEN_PV_DOM0 && X86_MCE
        help
          Allow kernel fetching MCE error from Xen platform and
          converting it into Linux mcelog format for mcelog tools
index 43ebfe3..3a50f09 100644 (file)
@@ -491,12 +491,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * Stop waiting if either state is not BP_EAGAIN and ballooning action is
- * needed, or if the credit has changed while state is BP_EAGAIN.
+ * Stop waiting if either state is BP_DONE and ballooning action is
+ * needed, or if the credit has changed while state is not BP_DONE.
  */
 static bool balloon_thread_cond(enum bp_state state, long credit)
 {
-       if (state != BP_EAGAIN)
+       if (state == BP_DONE)
                credit = 0;
 
        return current_credit() != credit || kthread_should_stop();
@@ -516,10 +516,19 @@ static int balloon_thread(void *unused)
 
        set_freezable();
        for (;;) {
-               if (state == BP_EAGAIN)
-                       timeout = balloon_stats.schedule_delay * HZ;
-               else
+               switch (state) {
+               case BP_DONE:
+               case BP_ECANCELED:
                        timeout = 3600 * HZ;
+                       break;
+               case BP_EAGAIN:
+                       timeout = balloon_stats.schedule_delay * HZ;
+                       break;
+               case BP_WAIT:
+                       timeout = HZ;
+                       break;
+               }
+
                credit = current_credit();
 
                wait_event_freezable_timeout(balloon_thread_wq,
index 720a7b7..3369734 100644 (file)
@@ -257,7 +257,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
        LIST_HEAD(pagelist);
        struct mmap_gfn_state state;
 
-       /* We only support privcmd_ioctl_mmap_batch for auto translated. */
+       /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -ENOSYS;
 
@@ -420,7 +420,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
        int rc;
        struct page **pages;
 
-       pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
+       pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
        if (pages == NULL)
                return -ENOMEM;
 
@@ -428,7 +428,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
        if (rc != 0) {
                pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
                        numpgs, rc);
-               kfree(pages);
+               kvfree(pages);
                return -ENOMEM;
        }
        BUG_ON(vma->vm_private_data != NULL);
@@ -803,21 +803,21 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
                unsigned int domid =
                        (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
                        DOMID_SELF : kdata.dom;
-               int num;
+               int num, *errs = (int *)pfns;
 
+               BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
                num = xen_remap_domain_mfn_array(vma,
                                                 kdata.addr & PAGE_MASK,
-                                                pfns, kdata.num, (int *)pfns,
+                                                pfns, kdata.num, errs,
                                                 vma->vm_page_prot,
-                                                domid,
-                                                vma->vm_private_data);
+                                                domid);
                if (num < 0)
                        rc = num;
                else if (num != kdata.num) {
                        unsigned int i;
 
                        for (i = 0; i < num; i++) {
-                               rc = pfns[i];
+                               rc = errs[i];
                                if (rc < 0)
                                        break;
                        }
@@ -912,7 +912,7 @@ static void privcmd_close(struct vm_area_struct *vma)
        else
                pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
                        numpgs, rc);
-       kfree(pages);
+       kvfree(pages);
 }
 
 static vm_fault_t privcmd_fault(struct vm_fault *vmf)
index dff2c8a..c0cebcf 100644 (file)
@@ -3030,7 +3030,7 @@ struct btrfs_dir_item *
 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            struct btrfs_path *path, u64 dir,
-                           u64 objectid, const char *name, int name_len,
+                           u64 index, const char *name, int name_len,
                            int mod);
 struct btrfs_dir_item *
 btrfs_search_dir_index_item(struct btrfs_root *root,
index f1274d5..7721ce0 100644 (file)
@@ -190,9 +190,20 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
 }
 
 /*
- * lookup a directory item based on name.  'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory item by name.
+ *
+ * @trans:     The transaction handle to use. Can be NULL if @mod is 0.
+ * @root:      The root of the target tree.
+ * @path:      Path to use for the search.
+ * @dir:       The inode number (objectid) of the directory.
+ * @name:      The name associated to the directory entry we are looking for.
+ * @name_len:  The length of the name.
+ * @mod:       Used to indicate if the tree search is meant for a read only
+ *             lookup, for a modification lookup or for a deletion lookup, so
+ *             its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir item does not exists, an error pointer if an error
+ * happened, or a pointer to a dir item if a dir item exists for the given name.
  */
 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root,
@@ -273,27 +284,42 @@ out:
 }
 
 /*
- * lookup a directory item based on index.  'dir' is the objectid
- * we're searching in, and 'mod' tells us if you plan on deleting the
- * item (use mod < 0) or changing the options (use mod > 0)
+ * Lookup for a directory index item by name and index number.
  *
- * The name is used to make sure the index really points to the name you were
- * looking for.
+ * @trans:     The transaction handle to use. Can be NULL if @mod is 0.
+ * @root:      The root of the target tree.
+ * @path:      Path to use for the search.
+ * @dir:       The inode number (objectid) of the directory.
+ * @index:     The index number.
+ * @name:      The name associated to the directory entry we are looking for.
+ * @name_len:  The length of the name.
+ * @mod:       Used to indicate if the tree search is meant for a read only
+ *             lookup, for a modification lookup or for a deletion lookup, so
+ *             its value should be 0, 1 or -1, respectively.
+ *
+ * Returns: NULL if the dir index item does not exists, an error pointer if an
+ * error happened, or a pointer to a dir item if the dir index item exists and
+ * matches the criteria (name and index number).
  */
 struct btrfs_dir_item *
 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            struct btrfs_path *path, u64 dir,
-                           u64 objectid, const char *name, int name_len,
+                           u64 index, const char *name, int name_len,
                            int mod)
 {
+       struct btrfs_dir_item *di;
        struct btrfs_key key;
 
        key.objectid = dir;
        key.type = BTRFS_DIR_INDEX_KEY;
-       key.offset = objectid;
+       key.offset = index;
 
-       return btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
+       if (di == ERR_PTR(-ENOENT))
+               return NULL;
+
+       return di;
 }
 
 struct btrfs_dir_item *
index fc3da75..0ab456c 100644 (file)
@@ -4859,6 +4859,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 out_free_delayed:
        btrfs_free_delayed_extent_op(extent_op);
 out_free_buf:
+       btrfs_tree_unlock(buf);
        free_extent_buffer(buf);
 out_free_reserved:
        btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
index 7ff5770..a176236 100644 (file)
@@ -734,8 +734,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
        if (args->start >= inode->disk_i_size && !args->replace_extent)
                modify_tree = 0;
 
-       update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
-                      root == fs_info->tree_root);
+       update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
        while (1) {
                recow = 0;
                ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -2704,14 +2703,16 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
                                                 drop_args.bytes_found);
                if (ret != -ENOSPC) {
                        /*
-                        * When cloning we want to avoid transaction aborts when
-                        * nothing was done and we are attempting to clone parts
-                        * of inline extents, in such cases -EOPNOTSUPP is
-                        * returned by __btrfs_drop_extents() without having
-                        * changed anything in the file.
+                        * The only time we don't want to abort is if we are
+                        * attempting to clone a partial inline extent, in which
+                        * case we'll get EOPNOTSUPP.  However if we aren't
+                        * clone we need to abort no matter what, because if we
+                        * got EOPNOTSUPP via prealloc then we messed up and
+                        * need to abort.
                         */
-                       if (extent_info && !extent_info->is_new_extent &&
-                           ret && ret != -EOPNOTSUPP)
+                       if (ret &&
+                           (ret != -EOPNOTSUPP ||
+                            (extent_info && extent_info->is_new_extent)))
                                btrfs_abort_transaction(trans, ret);
                        break;
                }
index f7efc26..b415c5e 100644 (file)
@@ -939,9 +939,11 @@ out:
 }
 
 /*
- * helper function to see if a given name and sequence number found
- * in an inode back reference are already in a directory and correctly
- * point to this inode
+ * See if a given name and sequence number found in an inode back reference are
+ * already in a directory and correctly point to this inode.
+ *
+ * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
+ * exists.
  */
 static noinline int inode_in_dir(struct btrfs_root *root,
                                 struct btrfs_path *path,
@@ -950,29 +952,34 @@ static noinline int inode_in_dir(struct btrfs_root *root,
 {
        struct btrfs_dir_item *di;
        struct btrfs_key location;
-       int match = 0;
+       int ret = 0;
 
        di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
                                         index, name, name_len, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               ret = PTR_ERR(di);
+               goto out;
+       } else if (di) {
                btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
                if (location.objectid != objectid)
                        goto out;
-       } else
+       } else {
                goto out;
-       btrfs_release_path(path);
+       }
 
+       btrfs_release_path(path);
        di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
-       if (di && !IS_ERR(di)) {
-               btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
-               if (location.objectid != objectid)
-                       goto out;
-       } else
+       if (IS_ERR(di)) {
+               ret = PTR_ERR(di);
                goto out;
-       match = 1;
+       } else if (di) {
+               btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
+               if (location.objectid == objectid)
+                       ret = 1;
+       }
 out:
        btrfs_release_path(path);
-       return match;
+       return ret;
 }
 
 /*
@@ -1182,7 +1189,9 @@ next:
        /* look for a conflicting sequence number */
        di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
                                         ref_index, name, namelen, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               return PTR_ERR(di);
+       } else if (di) {
                ret = drop_one_dir_item(trans, root, path, dir, di);
                if (ret)
                        return ret;
@@ -1192,7 +1201,9 @@ next:
        /* look for a conflicting name */
        di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
                                   name, namelen, 0);
-       if (di && !IS_ERR(di)) {
+       if (IS_ERR(di)) {
+               return PTR_ERR(di);
+       } else if (di) {
                ret = drop_one_dir_item(trans, root, path, dir, di);
                if (ret)
                        return ret;
@@ -1517,10 +1528,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
 
-               /* if we already have a perfect match, we're done */
-               if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
-                                       btrfs_ino(BTRFS_I(inode)), ref_index,
-                                       name, namelen)) {
+               ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
+                                  btrfs_ino(BTRFS_I(inode)), ref_index,
+                                  name, namelen);
+               if (ret < 0) {
+                       goto out;
+               } else if (ret == 0) {
                        /*
                         * look for a conflicting back reference in the
                         * metadata. if we find one we have to unlink that name
@@ -1580,6 +1593,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                        if (ret)
                                goto out;
                }
+               /* Else, ret == 1, we already have a perfect match, we're done. */
 
                ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
                kfree(name);
@@ -1936,8 +1950,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        struct btrfs_key log_key;
        struct inode *dir;
        u8 log_type;
-       int exists;
-       int ret = 0;
+       bool exists;
+       int ret;
        bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
        bool name_added = false;
 
@@ -1957,12 +1971,12 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                   name_len);
 
        btrfs_dir_item_key_to_cpu(eb, di, &log_key);
-       exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
-       if (exists == 0)
-               exists = 1;
-       else
-               exists = 0;
+       ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
        btrfs_release_path(path);
+       if (ret < 0)
+               goto out;
+       exists = (ret == 0);
+       ret = 0;
 
        if (key->type == BTRFS_DIR_ITEM_KEY) {
                dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1977,7 +1991,11 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                ret = -EINVAL;
                goto out;
        }
-       if (IS_ERR_OR_NULL(dst_di)) {
+
+       if (IS_ERR(dst_di)) {
+               ret = PTR_ERR(dst_di);
+               goto out;
+       } else if (!dst_di) {
                /* we need a sequence number to insert, so we only
                 * do inserts for the BTRFS_DIR_INDEX_KEY types
                 */
@@ -2281,7 +2299,7 @@ again:
                                                     dir_key->offset,
                                                     name, name_len, 0);
                }
-               if (!log_di || log_di == ERR_PTR(-ENOENT)) {
+               if (!log_di) {
                        btrfs_dir_item_key_to_cpu(eb, di, &location);
                        btrfs_release_path(path);
                        btrfs_release_path(log_path);
@@ -3540,8 +3558,7 @@ out_unlock:
        if (err == -ENOSPC) {
                btrfs_set_log_full_commit(trans);
                err = 0;
-       } else if (err < 0 && err != -ENOENT) {
-               /* ENOENT can be returned if the entry hasn't been fsynced yet */
+       } else if (err < 0) {
                btrfs_abort_transaction(trans, err);
        }
 
index af086d3..48b18b4 100644 (file)
@@ -296,10 +296,12 @@ int ksmbd_conn_handler_loop(void *p)
                pdu_size = get_rfc1002_len(hdr_buf);
                ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
 
-               /* make sure we have enough to get to SMB header end */
-               if (!ksmbd_pdu_size_has_room(pdu_size)) {
-                       ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
-                                   pdu_size);
+               /*
+                * Check if pdu size is valid (min : smb header size,
+                * max : 0x00FFFFFF).
+                */
+               if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+                   pdu_size > MAX_STREAM_PROT_LEN) {
                        continue;
                }
 
index 49a5a3a..5b8f3e0 100644 (file)
@@ -12,7 +12,7 @@
 #include "unicode.h"
 #include "vfs_cache.h"
 
-#define KSMBD_VERSION  "3.1.9"
+#define KSMBD_VERSION  "3.4.2"
 
 extern int ksmbd_debug_types;
 
index 9aa46bb..9edd9c1 100644 (file)
@@ -80,18 +80,21 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
 };
 
 /*
- * Returns the pointer to the beginning of the data area. Length of the data
- * area and the offset to it (from the beginning of the smb are also returned.
+ * Set length of the data area and the offset to arguments.
+ * if they are invalid, return error.
  */
-static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
+static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+                                 struct smb2_hdr *hdr)
 {
+       int ret = 0;
+
        *off = 0;
        *len = 0;
 
        /* error reqeusts do not have data area */
        if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
            (((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
-               return NULL;
+               return ret;
 
        /*
         * Following commands have data areas so we have to get the location
@@ -165,69 +168,60 @@ static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
        case SMB2_IOCTL:
                *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
                *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
-
                break;
        default:
                ksmbd_debug(SMB, "no length check for command\n");
                break;
        }
 
-       /*
-        * Invalid length or offset probably means data area is invalid, but
-        * we have little choice but to ignore the data area in this case.
-        */
        if (*off > 4096) {
-               ksmbd_debug(SMB, "offset %d too large, data area ignored\n",
-                           *off);
-               *len = 0;
-               *off = 0;
-       } else if (*off < 0) {
-               ksmbd_debug(SMB,
-                           "negative offset %d to data invalid ignore data area\n",
-                           *off);
-               *off = 0;
-               *len = 0;
-       } else if (*len < 0) {
-               ksmbd_debug(SMB,
-                           "negative data length %d invalid, data area ignored\n",
-                           *len);
-               *len = 0;
-       } else if (*len > 128 * 1024) {
-               ksmbd_debug(SMB, "data area larger than 128K: %d\n", *len);
-               *len = 0;
+               ksmbd_debug(SMB, "offset %d too large\n", *off);
+               ret = -EINVAL;
+       } else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
+               ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
+                           MAX_STREAM_PROT_LEN, (u64)*off + *len);
+               ret = -EINVAL;
        }
 
-       /* return pointer to beginning of data area, ie offset from SMB start */
-       if ((*off != 0) && (*len != 0))
-               return (char *)hdr + *off;
-       else
-               return NULL;
+       return ret;
 }
 
 /*
  * Calculate the size of the SMB message based on the fixed header
  * portion, the number of word parameters and the data portion of the message.
  */
-static unsigned int smb2_calc_size(void *buf)
+static int smb2_calc_size(void *buf, unsigned int *len)
 {
        struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
        struct smb2_hdr *hdr = &pdu->hdr;
-       int offset; /* the offset from the beginning of SMB to data area */
-       int data_length; /* the length of the variable length data area */
+       unsigned int offset; /* the offset from the beginning of SMB to data area */
+       unsigned int data_length; /* the length of the variable length data area */
+       int ret;
+
        /* Structure Size has already been checked to make sure it is 64 */
-       int len = le16_to_cpu(hdr->StructureSize);
+       *len = le16_to_cpu(hdr->StructureSize);
 
        /*
         * StructureSize2, ie length of fixed parameter area has already
         * been checked to make sure it is the correct length.
         */
-       len += le16_to_cpu(pdu->StructureSize2);
+       *len += le16_to_cpu(pdu->StructureSize2);
+       /*
+        * StructureSize2 of smb2_lock pdu is set to 48, indicating
+        * the size of smb2 lock request with single smb2_lock_element
+        * regardless of number of locks. Subtract single
+        * smb2_lock_element for correct buffer size check.
+        */
+       if (hdr->Command == SMB2_LOCK)
+               *len -= sizeof(struct smb2_lock_element);
 
        if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
                goto calc_size_exit;
 
-       smb2_get_data_area_len(&offset, &data_length, hdr);
-       ksmbd_debug(SMB, "SMB2 data length %d offset %d\n", data_length,
+       ret = smb2_get_data_area_len(&offset, &data_length, hdr);
+       if (ret)
+               return ret;
+       ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
                    offset);
 
        if (data_length > 0) {
@@ -237,16 +231,19 @@ static unsigned int smb2_calc_size(void *buf)
                 * for some commands, typically those with odd StructureSize,
                 * so we must add one to the calculation.
                 */
-               if (offset + 1 < len)
+               if (offset + 1 < *len) {
                        ksmbd_debug(SMB,
-                                   "data area offset %d overlaps SMB2 header %d\n",
-                                   offset + 1, len);
-               else
-                       len = offset + data_length;
+                                   "data area offset %d overlaps SMB2 header %u\n",
+                                   offset + 1, *len);
+                       return -EINVAL;
+               }
+
+               *len = offset + data_length;
        }
+
 calc_size_exit:
-       ksmbd_debug(SMB, "SMB2 len %d\n", len);
-       return len;
+       ksmbd_debug(SMB, "SMB2 len %u\n", *len);
+       return 0;
 }
 
 static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
@@ -391,9 +388,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                return 1;
        }
 
-       clc_len = smb2_calc_size(hdr);
+       if (smb2_calc_size(hdr, &clc_len))
+               return 1;
+
        if (len != clc_len) {
-               /* server can return one byte more due to implied bcc[0] */
+               /* client can return one byte more due to implied bcc[0] */
                if (clc_len == len + 1)
                        return 0;
 
@@ -418,9 +417,6 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
                        return 0;
                }
 
-               if (command == SMB2_LOCK_HE && len == 88)
-                       return 0;
-
                ksmbd_debug(SMB,
                            "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
                            len, clc_len, command,
index 1974738..b06456e 100644 (file)
@@ -187,11 +187,6 @@ static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
        [SMB2_CHANGE_NOTIFY_HE] =       { .proc = smb2_notify},
 };
 
-int init_smb2_0_server(struct ksmbd_conn *conn)
-{
-       return -EOPNOTSUPP;
-}
-
 /**
  * init_smb2_1_server() - initialize a smb server connection with smb2.1
  *                     command dispatcher
index dcf9077..005aa93 100644 (file)
@@ -236,9 +236,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
 
        if (conn->need_neg == false)
                return -EINVAL;
-       if (!(conn->dialect >= SMB20_PROT_ID &&
-             conn->dialect <= SMB311_PROT_ID))
-               return -EINVAL;
 
        rsp_hdr = work->response_buf;
 
@@ -1166,13 +1163,6 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        case SMB21_PROT_ID:
                init_smb2_1_server(conn);
                break;
-       case SMB20_PROT_ID:
-               rc = init_smb2_0_server(conn);
-               if (rc) {
-                       rsp->hdr.Status = STATUS_NOT_SUPPORTED;
-                       goto err_out;
-               }
-               break;
        case SMB2X_PROT_ID:
        case BAD_PROT_ID:
        default:
@@ -1191,11 +1181,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
        rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
        rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               memcpy(conn->ClientGUID, req->ClientGUID,
-                      SMB2_CLIENT_GUID_SIZE);
-               conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
-       }
+       memcpy(conn->ClientGUID, req->ClientGUID,
+                       SMB2_CLIENT_GUID_SIZE);
+       conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
 
        rsp->StructureSize = cpu_to_le16(65);
        rsp->DialectRevision = cpu_to_le16(conn->dialect);
@@ -1537,11 +1525,9 @@ binding_session:
                }
        }
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               if (!ksmbd_conn_lookup_dialect(conn)) {
-                       pr_err("fail to verify the dialect\n");
-                       return -ENOENT;
-               }
+       if (!ksmbd_conn_lookup_dialect(conn)) {
+               pr_err("fail to verify the dialect\n");
+               return -ENOENT;
        }
        return 0;
 }
@@ -1623,11 +1609,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
                }
        }
 
-       if (conn->dialect > SMB20_PROT_ID) {
-               if (!ksmbd_conn_lookup_dialect(conn)) {
-                       pr_err("fail to verify the dialect\n");
-                       return -ENOENT;
-               }
+       if (!ksmbd_conn_lookup_dialect(conn)) {
+               pr_err("fail to verify the dialect\n");
+               return -ENOENT;
        }
        return 0;
 }
@@ -5499,7 +5483,6 @@ static int set_file_basic_info(struct ksmbd_file *fp,
                               struct ksmbd_share_config *share)
 {
        struct iattr attrs;
-       struct timespec64 ctime;
        struct file *filp;
        struct inode *inode;
        struct user_namespace *user_ns;
@@ -5521,13 +5504,11 @@ static int set_file_basic_info(struct ksmbd_file *fp,
                attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
        }
 
-       if (file_info->ChangeTime) {
+       attrs.ia_valid |= ATTR_CTIME;
+       if (file_info->ChangeTime)
                attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
-               ctime = attrs.ia_ctime;
-               attrs.ia_valid |= ATTR_CTIME;
-       } else {
-               ctime = inode->i_ctime;
-       }
+       else
+               attrs.ia_ctime = inode->i_ctime;
 
        if (file_info->LastWriteTime) {
                attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
@@ -5573,11 +5554,9 @@ static int set_file_basic_info(struct ksmbd_file *fp,
                        return -EACCES;
 
                inode_lock(inode);
+               inode->i_ctime = attrs.ia_ctime;
+               attrs.ia_valid &= ~ATTR_CTIME;
                rc = notify_change(user_ns, dentry, &attrs, NULL);
-               if (!rc) {
-                       inode->i_ctime = ctime;
-                       mark_inode_dirty(inode);
-               }
                inode_unlock(inode);
        }
        return rc;
@@ -8411,20 +8390,18 @@ int smb3_decrypt_req(struct ksmbd_work *work)
        struct smb2_hdr *hdr;
        unsigned int pdu_length = get_rfc1002_len(buf);
        struct kvec iov[2];
-       unsigned int buf_data_size = pdu_length + 4 -
+       int buf_data_size = pdu_length + 4 -
                sizeof(struct smb2_transform_hdr);
        struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
        int rc = 0;
 
-       if (pdu_length + 4 <
-           sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
+       if (buf_data_size < sizeof(struct smb2_hdr)) {
                pr_err("Transform message is too small (%u)\n",
                       pdu_length);
                return -ECONNABORTED;
        }
 
-       if (pdu_length + 4 <
-           le32_to_cpu(tr_hdr->OriginalMessageSize) + sizeof(struct smb2_transform_hdr)) {
+       if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
                pr_err("Transform message is broken\n");
                return -ECONNABORTED;
        }
index 261825d..a6dec5e 100644 (file)
@@ -1637,7 +1637,6 @@ struct smb2_posix_info {
 } __packed;
 
 /* functions */
-int init_smb2_0_server(struct ksmbd_conn *conn);
 void init_smb2_1_server(struct ksmbd_conn *conn);
 void init_smb3_0_server(struct ksmbd_conn *conn);
 void init_smb3_02_server(struct ksmbd_conn *conn);
index db8042a..707490a 100644 (file)
@@ -21,7 +21,6 @@ static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
 #define MAGIC_CHAR '~'
 #define PERIOD '.'
 #define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
-#define KSMBD_MIN_SUPPORTED_HEADER_SIZE        (sizeof(struct smb2_hdr))
 
 struct smb_protocol {
        int             index;
@@ -89,7 +88,7 @@ unsigned int ksmbd_server_side_copy_max_total_size(void)
 
 inline int ksmbd_min_protocol(void)
 {
-       return SMB2_PROT;
+       return SMB21_PROT;
 }
 
 inline int ksmbd_max_protocol(void)
@@ -294,11 +293,6 @@ int ksmbd_init_smb_server(struct ksmbd_work *work)
        return 0;
 }
 
-bool ksmbd_pdu_size_has_room(unsigned int pdu)
-{
-       return (pdu >= KSMBD_MIN_SUPPORTED_HEADER_SIZE - 4);
-}
-
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
                                      struct ksmbd_file *dir,
                                      struct ksmbd_dir_info *d_info,
@@ -433,7 +427,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
 
 static int __smb2_negotiate(struct ksmbd_conn *conn)
 {
-       return (conn->dialect >= SMB20_PROT_ID &&
+       return (conn->dialect >= SMB21_PROT_ID &&
                conn->dialect <= SMB311_PROT_ID);
 }
 
@@ -463,7 +457,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
                }
        }
 
-       if (command == SMB2_NEGOTIATE_HE) {
+       if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
                ret = smb2_handle_negotiate(work);
                init_smb2_neg_rsp(work);
                return ret;
index 994abed..6e79e75 100644 (file)
@@ -48,6 +48,8 @@
 #define CIFS_DEFAULT_IOSIZE    (64 * 1024)
 #define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
 
+#define MAX_STREAM_PROT_LEN    0x00FFFFFF
+
 /* Responses when opening a file. */
 #define F_SUPERSEDED   0
 #define F_OPENED       1
@@ -493,8 +495,6 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
 
 int ksmbd_init_smb_server(struct ksmbd_work *work);
 
-bool ksmbd_pdu_size_has_room(unsigned int pdu);
-
 struct ksmbd_kstat;
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
                                      int info_level,
index cc7338f..7ce93aa 100644 (file)
@@ -957,7 +957,7 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
 
 #ifndef iounmap
 #define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
 {
 }
 #endif
index 24b40e5..018e776 100644 (file)
@@ -613,7 +613,7 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
  * and is automatically cleaned up after the test case concludes. See &struct
  * kunit_resource for more information.
  */
-void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t flags);
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
 
 /**
  * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
@@ -657,9 +657,9 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
  *
  * See kcalloc() and kunit_kmalloc_array() for more information.
  */
-static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t flags)
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
 {
-       return kunit_kmalloc_array(test, n, size, flags | __GFP_ZERO);
+       return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
 }
 
 void kunit_cleanup(struct kunit *test);
index 7d1cabe..63ccb52 100644 (file)
@@ -321,10 +321,20 @@ asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
  * from register 0 to 3 on return from the SMC instruction.  An optional
  * quirk structure provides vendor specific behavior.
  */
+#ifdef CONFIG_HAVE_ARM_SMCCC
 asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
                        unsigned long a2, unsigned long a3, unsigned long a4,
                        unsigned long a5, unsigned long a6, unsigned long a7,
                        struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+                       unsigned long a2, unsigned long a3, unsigned long a4,
+                       unsigned long a5, unsigned long a6, unsigned long a7,
+                       struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+       *res = (struct arm_smccc_res){};
+}
+#endif
 
 /**
  * __arm_smccc_hvc() - make HVC calls
index c0475d1..81cad9e 100644 (file)
@@ -61,7 +61,6 @@ enum qcom_scm_ice_cipher {
 #define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
 #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
 
-#if IS_ENABLED(CONFIG_QCOM_SCM)
 extern bool qcom_scm_is_available(void);
 
 extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
@@ -115,74 +114,4 @@ extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
 extern int qcom_scm_lmh_profile_change(u32 profile_id);
 extern bool qcom_scm_lmh_dcvsh_available(void);
 
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
-               const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
-               const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
-               { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
-               size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
-               phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
-               { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
-               { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
-               { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
-               { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
-               { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
-               { return -ENODEV; }
-extern inline int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
-                                                u32 cp_nonpixel_start,
-                                                u32 cp_nonpixel_size)
-               { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
-               unsigned int *src, const struct qcom_scm_vmperm *newvm,
-               unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
-               u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
-               u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_ice_available(void) { return false; }
-static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
-static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
-                                      enum qcom_scm_ice_cipher cipher,
-                                      u32 data_unit_size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
-               u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
-               { return -ENODEV; }
-
-static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
-                                    u64 limit_node, u32 node_id, u64 version)
-               { return -ENODEV; }
-
-static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; }
-
-static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; }
-#endif
 #endif
index 2ebef6b..74d3c1e 100644 (file)
@@ -399,9 +399,8 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
  * RETURNS:
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
-struct workqueue_struct *alloc_workqueue(const char *fmt,
-                                        unsigned int flags,
-                                        int max_active, ...);
+__printf(1, 4) struct workqueue_struct *
+alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
 
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
index 2eb6d7c..f37c7a5 100644 (file)
@@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
         * Verification Tag value does not match the receiver's own
         * tag value, the receiver shall silently discard the packet...
         */
-        if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
-                return 1;
+       if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
+               return 0;
 
        chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
-       return 0;
+       return 1;
 }
 
 /* Check VTAG of the packet matches the sender's own tag and the T bit is
index 3166dc1..60c3845 100644 (file)
@@ -1576,6 +1576,7 @@ struct tcp_md5sig_key {
        u8                      keylen;
        u8                      family; /* AF_INET or AF_INET6 */
        u8                      prefixlen;
+       u8                      flags;
        union tcp_md5_addr      addr;
        int                     l3index; /* set if key added with L3 scope */
        u8                      key[TCP_MD5SIG_MAXKEYLEN];
@@ -1621,10 +1622,10 @@ struct tcp_md5sig_pool {
 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
                        const struct sock *sk, const struct sk_buff *skb);
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index,
+                  int family, u8 prefixlen, int l3index, u8 flags,
                   const u8 *newkey, u8 newkeylen, gfp_t gfp);
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index);
+                  int family, u8 prefixlen, int l3index, u8 flags);
 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
                                         const struct sock *addr_sk);
 
index 01570db..0e45963 100644 (file)
@@ -224,6 +224,7 @@ struct hda_codec {
 #endif
 
        /* misc flags */
+       unsigned int configured:1; /* codec was configured */
        unsigned int in_freeing:1; /* being released */
        unsigned int registered:1; /* codec was registered */
        unsigned int display_power_control:1; /* needs display power */
index db28e79..a3584a3 100644 (file)
@@ -52,12 +52,12 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 #if defined(CONFIG_XEN_PV)
 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages);
+                 unsigned int domid, bool no_translate);
 #else
 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                                xen_pfn_t *pfn, int nr, int *err_ptr,
                                pgprot_t prot,  unsigned int domid,
-                               bool no_translate, struct page **pages)
+                               bool no_translate)
 {
        BUG();
        return 0;
@@ -134,7 +134,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
         */
        BUG_ON(err_ptr == NULL);
        return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
-                            false, pages);
+                            false);
 }
 
 /*
@@ -146,7 +146,6 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
  * @err_ptr: Returns per-MFN error status.
  * @prot:    page protection mask
  * @domid:   Domain owning the pages
- * @pages:   Array of pages if this domain has an auto-translated physmap
  *
  * @mfn and @err_ptr may point to the same buffer, the MFNs will be
  * overwritten by the error codes after they are mapped.
@@ -157,14 +156,13 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
 static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
                                             unsigned long addr, xen_pfn_t *mfn,
                                             int nr, int *err_ptr,
-                                            pgprot_t prot, unsigned int domid,
-                                            struct page **pages)
+                                            pgprot_t prot, unsigned int domid)
 {
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -EOPNOTSUPP;
 
        return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
-                            true, pages);
+                            true);
 }
 
 /* xen_remap_domain_gfn_range() - map a range of foreign frames
@@ -188,8 +186,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -EOPNOTSUPP;
 
-       return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
-                            pages);
+       return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
 }
 
 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
index df1ccf4..2a9695c 100644 (file)
@@ -311,17 +311,19 @@ static struct cpuset top_cpuset = {
                if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
 
 /*
- * There are two global locks guarding cpuset structures - cpuset_mutex and
+ * There are two global locks guarding cpuset structures - cpuset_rwsem and
  * callback_lock. We also require taking task_lock() when dereferencing a
  * task's cpuset pointer. See "The task_lock() exception", at the end of this
- * comment.
+ * comment.  The cpuset code uses only cpuset_rwsem write lock.  Other
+ * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
+ * prevent change to cpuset structures.
  *
  * A task must hold both locks to modify cpusets.  If a task holds
- * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
+ * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
  * is the only task able to also acquire callback_lock and be able to
  * modify cpusets.  It can perform various checks on the cpuset structure
  * first, knowing nothing will change.  It can also allocate memory while
- * just holding cpuset_mutex.  While it is performing these checks, various
+ * just holding cpuset_rwsem.  While it is performing these checks, various
  * callback routines can briefly acquire callback_lock to query cpusets.
  * Once it is ready to make the changes, it takes callback_lock, blocking
  * everyone else.
@@ -393,7 +395,7 @@ static inline bool is_in_v2_mode(void)
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void guarantee_online_cpus(struct task_struct *tsk,
                                  struct cpumask *pmask)
@@ -435,7 +437,7 @@ out_unlock:
  * One way or another, we guarantee to return some non-empty subset
  * of node_states[N_MEMORY].
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 {
@@ -447,7 +449,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 /*
  * update task's spread flag if cpuset's page/slab spread flag is set
  *
- * Call with callback_lock or cpuset_mutex held.
+ * Call with callback_lock or cpuset_rwsem held.
  */
 static void cpuset_update_task_spread_flag(struct cpuset *cs,
                                        struct task_struct *tsk)
@@ -468,7 +470,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
  *
  * One cpuset is a subset of another if all its allowed CPUs and
  * Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set.  Call holding cpuset_mutex.
+ * are only set if the other's are set.  Call holding cpuset_rwsem.
  */
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -577,7 +579,7 @@ static inline void free_cpuset(struct cpuset *cs)
  * If we replaced the flag and mask values of the current cpuset
  * (cur) with those values in the trial cpuset (trial), would
  * our various subset and exclusive rules still be valid?  Presumes
- * cpuset_mutex held.
+ * cpuset_rwsem held.
  *
  * 'cur' is the address of an actual, in-use cpuset.  Operations
  * such as list traversal that depend on the actual address of the
@@ -700,7 +702,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
        rcu_read_unlock();
 }
 
-/* Must be called with cpuset_mutex held.  */
+/* Must be called with cpuset_rwsem held.  */
 static inline int nr_cpusets(void)
 {
        /* jump label reference count + the top-level cpuset */
@@ -726,7 +728,7 @@ static inline int nr_cpusets(void)
  * domains when operating in the severe memory shortage situations
  * that could cause allocation failures below.
  *
- * Must be called with cpuset_mutex held.
+ * Must be called with cpuset_rwsem held.
  *
  * The three key local variables below are:
  *    cp - cpuset pointer, used (together with pos_css) to perform a
@@ -1005,7 +1007,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
  * 'cpus' is removed, then call this routine to rebuild the
  * scheduler's dynamic sched domains.
  *
- * Call with cpuset_mutex held.  Takes cpus_read_lock().
+ * Call with cpuset_rwsem held.  Takes cpus_read_lock().
  */
 static void rebuild_sched_domains_locked(void)
 {
@@ -1078,7 +1080,7 @@ void rebuild_sched_domains(void)
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
  *
  * Iterate through each task of @cs updating its cpus_allowed to the
- * effective cpuset's.  As this function is called with cpuset_mutex held,
+ * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
 static void update_tasks_cpumask(struct cpuset *cs)
@@ -1347,7 +1349,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
  *
  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
  *
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
  */
 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
 {
@@ -1704,12 +1706,12 @@ static void *cpuset_being_rebound;
  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
  *
  * Iterate through each task of @cs updating its mems_allowed to the
- * effective cpuset's.  As this function is called with cpuset_mutex held,
+ * effective cpuset's.  As this function is called with cpuset_rwsem held,
  * cpuset membership stays stable.
  */
 static void update_tasks_nodemask(struct cpuset *cs)
 {
-       static nodemask_t newmems;      /* protected by cpuset_mutex */
+       static nodemask_t newmems;      /* protected by cpuset_rwsem */
        struct css_task_iter it;
        struct task_struct *task;
 
@@ -1722,7 +1724,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
         * take while holding tasklist_lock.  Forks can happen - the
         * mpol_dup() cpuset_being_rebound check will catch such forks,
         * and rebind their vma mempolicies too.  Because we still hold
-        * the global cpuset_mutex, we know that no other rebind effort
+        * the global cpuset_rwsem, we know that no other rebind effort
         * will be contending for the global variable cpuset_being_rebound.
         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
         * is idempotent.  Also migrate pages in each mm to new nodes.
@@ -1768,7 +1770,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
  *
  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
  *
- * Called with cpuset_mutex held
+ * Called with cpuset_rwsem held
  */
 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
 {
@@ -1821,7 +1823,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
  * mempolicies and if the cpuset is marked 'memory_migrate',
  * migrate the tasks pages to the new memory.
  *
- * Call with cpuset_mutex held. May take callback_lock during call.
+ * Call with cpuset_rwsem held. May take callback_lock during call.
  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
  * their mempolicies to the cpusets new mems_allowed.
@@ -1911,7 +1913,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
  * @cs: the cpuset in which each task's spread flags needs to be changed
  *
  * Iterate through each task of @cs updating its spread flags.  As this
- * function is called with cpuset_mutex held, cpuset membership stays
+ * function is called with cpuset_rwsem held, cpuset membership stays
  * stable.
  */
 static void update_tasks_flags(struct cpuset *cs)
@@ -1931,7 +1933,7 @@ static void update_tasks_flags(struct cpuset *cs)
  * cs:         the cpuset to update
  * turning_on:         whether the flag is being set or cleared
  *
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
  */
 
 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1980,7 +1982,7 @@ out:
  * cs: the cpuset to update
  * new_prs: new partition root state
  *
- * Call with cpuset_mutex held.
+ * Call with cpuset_rwsem held.
  */
 static int update_prstate(struct cpuset *cs, int new_prs)
 {
@@ -2167,7 +2169,7 @@ static int fmeter_getrate(struct fmeter *fmp)
 
 static struct cpuset *cpuset_attach_old_cs;
 
-/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
+/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
 static int cpuset_can_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
@@ -2219,7 +2221,7 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
 }
 
 /*
- * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
+ * Protected by cpuset_rwsem.  cpus_attach is used only by cpuset_attach()
  * but we can't allocate it dynamically there.  Define it global and
  * allocate from cpuset_init().
  */
@@ -2227,7 +2229,7 @@ static cpumask_var_t cpus_attach;
 
 static void cpuset_attach(struct cgroup_taskset *tset)
 {
-       /* static buf protected by cpuset_mutex */
+       /* static buf protected by cpuset_rwsem */
        static nodemask_t cpuset_attach_nodemask_to;
        struct task_struct *task;
        struct task_struct *leader;
@@ -2417,7 +2419,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
         * operation like this one can lead to a deadlock through kernfs
         * active_ref protection.  Let's break the protection.  Losing the
         * protection is okay as we check whether @cs is online after
-        * grabbing cpuset_mutex anyway.  This only happens on the legacy
+        * grabbing cpuset_rwsem anyway.  This only happens on the legacy
         * hierarchies.
         */
        css_get(&cs->css);
@@ -3672,7 +3674,7 @@ void __cpuset_memory_pressure_bump(void)
  *  - Used for /proc/<pid>/cpuset.
  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
  *    doesn't really matter if tsk->cpuset changes after we read it,
- *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
+ *    and we take cpuset_rwsem, keeping cpuset_attach() from changing it
  *    anyway.
  */
 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
index 40ec9a0..5c26a76 100644 (file)
@@ -4489,8 +4489,10 @@ static void cfi_init(struct module *mod)
        /* Fix init/exit functions to point to the CFI jump table */
        if (init)
                mod->init = *init;
+#ifdef CONFIG_MODULE_UNLOAD
        if (exit)
                mod->exit = *exit;
+#endif
 
        cfi_module_add(mod, module_addr_min);
 #endif
index 33a6b4a..1b3eb1e 100644 (file)
@@ -4830,8 +4830,16 @@ void show_workqueue_state(void)
 
                for_each_pwq(pwq, wq) {
                        raw_spin_lock_irqsave(&pwq->pool->lock, flags);
-                       if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+                       if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+                               /*
+                                * Defer printing to avoid deadlocks in console
+                                * drivers that queue work while holding locks
+                                * also taken in their write paths.
+                                */
+                               printk_deferred_enter();
                                show_pwq(pwq);
+                               printk_deferred_exit();
+                       }
                        raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                        /*
                         * We could be printing a lot from atomic context, e.g.
@@ -4849,7 +4857,12 @@ void show_workqueue_state(void)
                raw_spin_lock_irqsave(&pool->lock, flags);
                if (pool->nr_workers == pool->nr_idle)
                        goto next_pool;
-
+               /*
+                * Defer printing to avoid deadlocks in console drivers that
+                * queue work while holding locks also taken in their write
+                * paths.
+                */
+               printk_deferred_enter();
                pr_info("pool %d:", pool->id);
                pr_cont_pool_info(pool);
                pr_cont(" hung=%us workers=%d",
@@ -4864,6 +4877,7 @@ void show_workqueue_state(void)
                        first = false;
                }
                pr_cont("\n");
+               printk_deferred_exit();
        next_pool:
                raw_spin_unlock_irqrestore(&pool->lock, flags);
                /*
index 5efd1b4..a841be5 100644 (file)
@@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
 obj-$(CONFIG_PLDMFW) += pldmfw/
 
 # KUnit tests
-CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
 obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
 obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
 obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
index cdbe54b..e14a18a 100644 (file)
@@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
        /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
        if (IS_ERR_OR_NULL(to_free))
                return;
-       kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
-                                    (void *)to_free);
+       kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
+                            (void *)to_free);
 }
 
 static struct kunit_suite *alloc_fake_suite(struct kunit *test,
index 2e62e0d..5b8ce65 100644 (file)
@@ -1037,6 +1037,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
 DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
 EXPORT_SYMBOL(tcp_md5_needed);
 
+static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
+{
+       if (!old)
+               return true;
+
+       /* l3index always overrides non-l3index */
+       if (old->l3index && new->l3index == 0)
+               return false;
+       if (old->l3index == 0 && new->l3index)
+               return true;
+
+       return old->prefixlen < new->prefixlen;
+}
+
 /* Find the Key structure for an address.  */
 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                                           const union tcp_md5_addr *addr,
@@ -1059,7 +1073,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                                 lockdep_sock_is_held(sk)) {
                if (key->family != family)
                        continue;
-               if (key->l3index && key->l3index != l3index)
+               if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
                        continue;
                if (family == AF_INET) {
                        mask = inet_make_mask(key->prefixlen);
@@ -1074,8 +1088,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
                        match = false;
                }
 
-               if (match && (!best_match ||
-                             key->prefixlen > best_match->prefixlen))
+               if (match && better_md5_match(best_match, key))
                        best_match = key;
        }
        return best_match;
@@ -1085,7 +1098,7 @@ EXPORT_SYMBOL(__tcp_md5_do_lookup);
 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
                                                      const union tcp_md5_addr *addr,
                                                      int family, u8 prefixlen,
-                                                     int l3index)
+                                                     int l3index, u8 flags)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_key *key;
@@ -1105,7 +1118,9 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
                                 lockdep_sock_is_held(sk)) {
                if (key->family != family)
                        continue;
-               if (key->l3index && key->l3index != l3index)
+               if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
+                       continue;
+               if (key->l3index != l3index)
                        continue;
                if (!memcmp(&key->addr, addr, size) &&
                    key->prefixlen == prefixlen)
@@ -1129,7 +1144,7 @@ EXPORT_SYMBOL(tcp_v4_md5_lookup);
 
 /* This can be called on a newly created socket, from other files */
 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                  int family, u8 prefixlen, int l3index,
+                  int family, u8 prefixlen, int l3index, u8 flags,
                   const u8 *newkey, u8 newkeylen, gfp_t gfp)
 {
        /* Add Key to the list */
@@ -1137,7 +1152,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_md5sig_info *md5sig;
 
-       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
        if (key) {
                /* Pre-existing entry - just update that one.
                 * Note that the key might be used concurrently.
@@ -1182,6 +1197,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
        key->family = family;
        key->prefixlen = prefixlen;
        key->l3index = l3index;
+       key->flags = flags;
        memcpy(&key->addr, addr,
               (family == AF_INET6) ? sizeof(struct in6_addr) :
                                      sizeof(struct in_addr));
@@ -1191,11 +1207,11 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 EXPORT_SYMBOL(tcp_md5_do_add);
 
 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
-                  u8 prefixlen, int l3index)
+                  u8 prefixlen, int l3index, u8 flags)
 {
        struct tcp_md5sig_key *key;
 
-       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
+       key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
        if (!key)
                return -ENOENT;
        hlist_del_rcu(&key->node);
@@ -1229,6 +1245,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        const union tcp_md5_addr *addr;
        u8 prefixlen = 32;
        int l3index = 0;
+       u8 flags;
 
        if (optlen < sizeof(cmd))
                return -EINVAL;
@@ -1239,6 +1256,8 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        if (sin->sin_family != AF_INET)
                return -EINVAL;
 
+       flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
        if (optname == TCP_MD5SIG_EXT &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
                prefixlen = cmd.tcpm_prefixlen;
@@ -1246,7 +1265,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
                        return -EINVAL;
        }
 
-       if (optname == TCP_MD5SIG_EXT &&
+       if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
                struct net_device *dev;
 
@@ -1267,12 +1286,12 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
 
        if (!cmd.tcpm_keylen)
-               return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
+               return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
 
        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
                return -EINVAL;
 
-       return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
+       return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
                              cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 }
 
@@ -1596,7 +1615,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                 * memory, then we end up not copying the key
                 * across. Shucks.
                 */
-               tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
+               tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
                               key->key, key->keylen, GFP_ATOMIC);
                sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
        }
index 12f985f..2f044a4 100644 (file)
@@ -464,13 +464,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 
 int ip6_forward(struct sk_buff *skb)
 {
-       struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
        struct dst_entry *dst = skb_dst(skb);
        struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct inet6_skb_parm *opt = IP6CB(skb);
        struct net *net = dev_net(dst->dev);
+       struct inet6_dev *idev;
        u32 mtu;
 
+       idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
        if (net->ipv6.devconf_all->forwarding == 0)
                goto error;
 
index 0ce52d4..b03dd02 100644 (file)
@@ -599,6 +599,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
        int l3index = 0;
        u8 prefixlen;
+       u8 flags;
 
        if (optlen < sizeof(cmd))
                return -EINVAL;
@@ -609,6 +610,8 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
        if (sin6->sin6_family != AF_INET6)
                return -EINVAL;
 
+       flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
+
        if (optname == TCP_MD5SIG_EXT &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
                prefixlen = cmd.tcpm_prefixlen;
@@ -619,7 +622,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
                prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
        }
 
-       if (optname == TCP_MD5SIG_EXT &&
+       if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
            cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
                struct net_device *dev;
 
@@ -640,9 +643,9 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
                if (ipv6_addr_v4mapped(&sin6->sin6_addr))
                        return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
                                              AF_INET, prefixlen,
-                                             l3index);
+                                             l3index, flags);
                return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
-                                     AF_INET6, prefixlen, l3index);
+                                     AF_INET6, prefixlen, l3index, flags);
        }
 
        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
@@ -650,12 +653,12 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
 
        if (ipv6_addr_v4mapped(&sin6->sin6_addr))
                return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
-                                     AF_INET, prefixlen, l3index,
+                                     AF_INET, prefixlen, l3index, flags,
                                      cmd.tcpm_key, cmd.tcpm_keylen,
                                      GFP_KERNEL);
 
        return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
-                             AF_INET6, prefixlen, l3index,
+                             AF_INET6, prefixlen, l3index, flags,
                              cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 }
 
@@ -1404,7 +1407,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                 * across. Shucks.
                 */
                tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
-                              AF_INET6, 128, l3index, key->key, key->keylen,
+                              AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
                               sk_gfp_mask(sk, GFP_ATOMIC));
        }
 #endif
index 952e468..4aad284 100644 (file)
@@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)              \
                += -fplugin-arg-structleak_plugin-byref
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)    \
                += -fplugin-arg-structleak_plugin-byref-all
+ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
+    DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
+endif
+export DISABLE_STRUCTLEAK_PLUGIN
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK)              \
                += -DSTRUCTLEAK_PLUGIN
 
index fd9777f..9dbab13 100755 (executable)
@@ -82,10 +82,8 @@ cat << EOF
 #define __IGNORE_truncate64
 #define __IGNORE_stat64
 #define __IGNORE_lstat64
-#define __IGNORE_fstat64
 #define __IGNORE_fcntl64
 #define __IGNORE_fadvise64_64
-#define __IGNORE_fstatat64
 #define __IGNORE_fstatfs64
 #define __IGNORE_statfs64
 #define __IGNORE_llseek
@@ -253,6 +251,10 @@ cat << EOF
 #define __IGNORE_getpmsg
 #define __IGNORE_putpmsg
 #define __IGNORE_vserver
+
+/* 64-bit ports never needed these, and new 32-bit ports can use statx */
+#define __IGNORE_fstat64
+#define __IGNORE_fstatat64
 EOF
 }
 
index a59de24..dfe5a64 100644 (file)
@@ -468,6 +468,76 @@ static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
 }
 #endif /* CONFIG_X86_X32 */
 
+#ifdef __BIG_ENDIAN
+typedef char __pad_before_u32[4];
+typedef char __pad_after_u32[0];
+#else
+typedef char __pad_before_u32[0];
+typedef char __pad_after_u32[4];
+#endif
+
+/* PCM 2.0.15 API definition had a bug in mmap control; it puts the avail_min
+ * at the wrong offset due to a typo in padding type.
+ * The bug hits only 32bit.
+ * A workaround for incorrect read/write is needed only in 32bit compat mode.
+ */
+struct __snd_pcm_mmap_control64_buggy {
+       __pad_before_u32 __pad1;
+       __u32 appl_ptr;
+       __pad_before_u32 __pad2;        /* SiC! here is the bug */
+       __pad_before_u32 __pad3;
+       __u32 avail_min;
+       __pad_after_uframe __pad4;
+};
+
+static int snd_pcm_ioctl_sync_ptr_buggy(struct snd_pcm_substream *substream,
+                                       struct snd_pcm_sync_ptr __user *_sync_ptr)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_sync_ptr sync_ptr;
+       struct __snd_pcm_mmap_control64_buggy *sync_cp;
+       volatile struct snd_pcm_mmap_status *status;
+       volatile struct snd_pcm_mmap_control *control;
+       int err;
+
+       memset(&sync_ptr, 0, sizeof(sync_ptr));
+       sync_cp = (struct __snd_pcm_mmap_control64_buggy *)&sync_ptr.c.control;
+       if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
+               return -EFAULT;
+       if (copy_from_user(sync_cp, &(_sync_ptr->c.control), sizeof(*sync_cp)))
+               return -EFAULT;
+       status = runtime->status;
+       control = runtime->control;
+       if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
+               err = snd_pcm_hwsync(substream);
+               if (err < 0)
+                       return err;
+       }
+       snd_pcm_stream_lock_irq(substream);
+       if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
+               err = pcm_lib_apply_appl_ptr(substream, sync_cp->appl_ptr);
+               if (err < 0) {
+                       snd_pcm_stream_unlock_irq(substream);
+                       return err;
+               }
+       } else {
+               sync_cp->appl_ptr = control->appl_ptr;
+       }
+       if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
+               control->avail_min = sync_cp->avail_min;
+       else
+               sync_cp->avail_min = control->avail_min;
+       sync_ptr.s.status.state = status->state;
+       sync_ptr.s.status.hw_ptr = status->hw_ptr;
+       sync_ptr.s.status.tstamp = status->tstamp;
+       sync_ptr.s.status.suspended_state = status->suspended_state;
+       sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
+       snd_pcm_stream_unlock_irq(substream);
+       if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
+               return -EFAULT;
+       return 0;
+}
+
 /*
  */
 enum {
@@ -537,7 +607,7 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
                if (in_x32_syscall())
                        return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
 #endif /* CONFIG_X86_X32 */
-               return snd_pcm_common_ioctl(file, substream, cmd, argp);
+               return snd_pcm_ioctl_sync_ptr_buggy(substream, argp);
        case SNDRV_PCM_IOCTL_HW_REFINE32:
                return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
        case SNDRV_PCM_IOCTL_HW_PARAMS32:
index 382275c..7f3fd8e 100644 (file)
@@ -156,6 +156,8 @@ static int snd_seq_device_dev_free(struct snd_device *device)
        struct snd_seq_device *dev = device->device_data;
 
        cancel_autoload_drivers();
+       if (dev->private_free)
+               dev->private_free(dev);
        put_device(&dev->dev);
        return 0;
 }
@@ -183,11 +185,7 @@ static int snd_seq_device_dev_disconnect(struct snd_device *device)
 
 static void snd_seq_dev_release(struct device *dev)
 {
-       struct snd_seq_device *sdev = to_seq_dev(dev);
-
-       if (sdev->private_free)
-               sdev->private_free(sdev);
-       kfree(sdev);
+       kfree(to_seq_dev(dev));
 }
 
 /*
index 062da7a..f7bd6e2 100644 (file)
@@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
        if (!full_reset)
                goto skip_reset;
 
-       /* clear STATESTS */
-       snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
+       /* clear STATESTS if not in reset */
+       if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
+               snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
 
        /* reset controller */
        snd_hdac_bus_enter_link_reset(bus);
index 2523b23..1c8bffc 100644 (file)
@@ -298,29 +298,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
 {
        int err;
 
+       if (codec->configured)
+               return 0;
+
        if (is_generic_config(codec))
                codec->probe_id = HDA_CODEC_ID_GENERIC;
        else
                codec->probe_id = 0;
 
-       err = snd_hdac_device_register(&codec->core);
-       if (err < 0)
-               return err;
+       if (!device_is_registered(&codec->core.dev)) {
+               err = snd_hdac_device_register(&codec->core);
+               if (err < 0)
+                       return err;
+       }
 
        if (!codec->preset)
                codec_bind_module(codec);
        if (!codec->preset) {
                err = codec_bind_generic(codec);
                if (err < 0) {
-                       codec_err(codec, "Unable to bind the codec\n");
-                       goto error;
+                       codec_dbg(codec, "Unable to bind the codec\n");
+                       return err;
                }
        }
 
+       codec->configured = 1;
        return 0;
-
- error:
-       snd_hdac_device_unregister(&codec->core);
-       return err;
 }
 EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
index a9ebefd..0c4a337 100644 (file)
@@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
        snd_array_free(&codec->nids);
        remove_conn_list(codec);
        snd_hdac_regmap_exit(&codec->core);
+       codec->configured = 0;
 }
 EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);
 
index 7cd4528..930ae40 100644 (file)
@@ -25,6 +25,7 @@
 #include <sound/core.h>
 #include <sound/initval.h>
 #include "hda_controller.h"
+#include "hda_local.h"
 
 #define CREATE_TRACE_POINTS
 #include "hda_controller_trace.h"
@@ -1248,17 +1249,24 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
 int azx_codec_configure(struct azx *chip)
 {
        struct hda_codec *codec, *next;
+       int success = 0;
 
-       /* use _safe version here since snd_hda_codec_configure() deregisters
-        * the device upon error and deletes itself from the bus list.
-        */
-       list_for_each_codec_safe(codec, next, &chip->bus) {
-               snd_hda_codec_configure(codec);
+       list_for_each_codec(codec, &chip->bus) {
+               if (!snd_hda_codec_configure(codec))
+                       success++;
        }
 
-       if (!azx_bus(chip)->num_codecs)
-               return -ENODEV;
-       return 0;
+       if (success) {
+               /* unregister failed codecs if any codec has been probed */
+               list_for_each_codec_safe(codec, next, &chip->bus) {
+                       if (!codec->configured) {
+                               codec_err(codec, "Unable to configure, disabling\n");
+                               snd_hdac_device_unregister(&codec->core);
+                       }
+               }
+       }
+
+       return success ? 0 : -ENODEV;
 }
 EXPORT_SYMBOL_GPL(azx_codec_configure);
 
index 3062f87..f5bf295 100644 (file)
@@ -41,7 +41,7 @@
 /* 24 unused */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
-/* 27 unused */
+#define AZX_DCAPS_RETRY_PROBE  (1 << 27)       /* retry probe if no codec is configured */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 #define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
 #define AZX_DCAPS_SEPARATE_STREAM_TAG  (1 << 30) /* capture and playback use separate stream tag */
index 4777743..4d22e7a 100644 (file)
@@ -307,7 +307,8 @@ enum {
 /* quirks for AMD SB */
 #define AZX_DCAPS_PRESET_AMD_SB \
        (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_AMD_WORKAROUND |\
-        AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+        AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME |\
+        AZX_DCAPS_RETRY_PROBE)
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
@@ -1723,7 +1724,7 @@ static void azx_check_snoop_available(struct azx *chip)
 
 static void azx_probe_work(struct work_struct *work)
 {
-       struct hda_intel *hda = container_of(work, struct hda_intel, probe_work);
+       struct hda_intel *hda = container_of(work, struct hda_intel, probe_work.work);
        azx_probe_continue(&hda->chip);
 }
 
@@ -1828,7 +1829,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        }
 
        /* continue probing in work context as may trigger request module */
-       INIT_WORK(&hda->probe_work, azx_probe_work);
+       INIT_DELAYED_WORK(&hda->probe_work, azx_probe_work);
 
        *rchip = chip;
 
@@ -2142,7 +2143,7 @@ static int azx_probe(struct pci_dev *pci,
 #endif
 
        if (schedule_probe)
-               schedule_work(&hda->probe_work);
+               schedule_delayed_work(&hda->probe_work, 0);
 
        dev++;
        if (chip->disabled)
@@ -2228,6 +2229,11 @@ static int azx_probe_continue(struct azx *chip)
        int dev = chip->dev_index;
        int err;
 
+       if (chip->disabled || hda->init_failed)
+               return -EIO;
+       if (hda->probe_retry)
+               goto probe_retry;
+
        to_hda_bus(bus)->bus_probing = 1;
        hda->probe_continued = 1;
 
@@ -2289,10 +2295,20 @@ static int azx_probe_continue(struct azx *chip)
 #endif
        }
 #endif
+
+ probe_retry:
        if (bus->codec_mask && !(probe_only[dev] & 1)) {
                err = azx_codec_configure(chip);
-               if (err < 0)
+               if (err) {
+                       if ((chip->driver_caps & AZX_DCAPS_RETRY_PROBE) &&
+                           ++hda->probe_retry < 60) {
+                               schedule_delayed_work(&hda->probe_work,
+                                                     msecs_to_jiffies(1000));
+                               return 0; /* keep things up */
+                       }
+                       dev_err(chip->card->dev, "Cannot probe codecs, giving up\n");
                        goto out_free;
+               }
        }
 
        err = snd_card_register(chip->card);
@@ -2322,6 +2338,7 @@ out_free:
                display_power(chip, false);
        complete_all(&hda->probe_wait);
        to_hda_bus(bus)->bus_probing = 0;
+       hda->probe_retry = 0;
        return 0;
 }
 
@@ -2347,7 +2364,7 @@ static void azx_remove(struct pci_dev *pci)
                 * device during cancel_work_sync() call.
                 */
                device_unlock(&pci->dev);
-               cancel_work_sync(&hda->probe_work);
+               cancel_delayed_work_sync(&hda->probe_work);
                device_lock(&pci->dev);
 
                snd_card_free(card);
index 3fb119f..0f39418 100644 (file)
@@ -14,7 +14,7 @@ struct hda_intel {
 
        /* sync probing */
        struct completion probe_wait;
-       struct work_struct probe_work;
+       struct delayed_work probe_work;
 
        /* card list (for power_save trigger) */
        struct list_head list;
@@ -30,6 +30,8 @@ struct hda_intel {
        unsigned int freed:1; /* resources already released */
 
        bool need_i915_power:1; /* the hda controller needs i915 power */
+
+       int probe_retry;        /* being probe-retry */
 };
 
 #endif
index 4407f7d..22d27b1 100644 (file)
@@ -526,6 +526,8 @@ static void alc_shutup_pins(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
 
        switch (codec->core.vendor_id) {
+       case 0x10ec0236:
+       case 0x10ec0256:
        case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
@@ -2537,7 +2539,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
-       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170SM", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x7715, "Clevo X170KM-G", ALC1220_FIXUP_CLEVO_PB51ED),
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
@@ -3528,7 +3531,8 @@ static void alc256_shutup(struct hda_codec *codec)
        /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
         * when booting with headset plugged. So skip setting it for the codec alc257
         */
-       if (codec->core.vendor_id != 0x10ec0257)
+       if (spec->codec_variant != ALC269_TYPE_ALC257 &&
+           spec->codec_variant != ALC269_TYPE_ALC256)
                alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
 
        if (!spec->no_shutup_pins)
@@ -6449,6 +6453,24 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
 /* for alc285_fixup_ideapad_s740_coef() */
 #include "ideapad_s740_helper.c"
 
+static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
+                                                           const struct hda_fixup *fix,
+                                                           int action)
+{
+       /*
+       * A certain other OS sets these coeffs to different values. On at least one TongFang
+       * barebone these settings might survive even a cold reboot. So to restore a clean slate the
+       * values are explicitly reset to default here. Without this, the external microphone is
+       * always in a plugged-in state, while the internal microphone is always in an unplugged
+       * state, breaking the ability to use the internal microphone.
+       */
+       alc_write_coef_idx(codec, 0x24, 0x0000);
+       alc_write_coef_idx(codec, 0x26, 0x0000);
+       alc_write_coef_idx(codec, 0x29, 0x3000);
+       alc_write_coef_idx(codec, 0x37, 0xfe05);
+       alc_write_coef_idx(codec, 0x45, 0x5089);
+}
+
 enum {
        ALC269_FIXUP_GPIO2,
        ALC269_FIXUP_SONY_VAIO,
@@ -6663,7 +6685,8 @@ enum {
        ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
        ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
        ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
-       ALC287_FIXUP_13S_GEN2_SPEAKERS
+       ALC287_FIXUP_13S_GEN2_SPEAKERS,
+       ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8344,7 +8367,7 @@ static const struct hda_fixup alc269_fixups[] = {
                .v.verbs = (const struct hda_verb[]) {
                        { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
                        { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
-                       { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
                        { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
                        { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
@@ -8361,6 +8384,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE,
        },
+       [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8452,6 +8479,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8789,6 +8819,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -10166,6 +10197,9 @@ enum {
        ALC671_FIXUP_HP_HEADSET_MIC2,
        ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
        ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
+       ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
+       ALC668_FIXUP_HEADSET_MIC,
+       ALC668_FIXUP_MIC_DET_COEF,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -10549,6 +10583,29 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC662_FIXUP_USI_FUNC
        },
+       [ALC668_FIXUP_ASUS_NO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x04a1112c },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC668_FIXUP_HEADSET_MIC
+       },
+       [ALC668_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_headset_mic,
+               .chained = true,
+               .chain_id = ALC668_FIXUP_MIC_DET_COEF
+       },
+       [ALC668_FIXUP_MIC_DET_COEF] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x15 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x0d60 },
+                       {}
+               },
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10584,6 +10641,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
        SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+       SND_PCI_QUIRK(0x1043, 0x185d, "ASUS G551JW", ALC668_FIXUP_ASUS_NO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
index 3d5848d..53ebabf 100644 (file)
@@ -2450,6 +2450,8 @@ static int scarlett2_update_monitor_other(struct usb_mixer_interface *mixer)
                err = scarlett2_usb_get_config(mixer,
                                               SCARLETT2_CONFIG_TALKBACK_MAP,
                                               1, &bitmap);
+               if (err < 0)
+                       return err;
                for (i = 0; i < num_mixes; i++, bitmap >>= 1)
                        private->talkback_map[i] = bitmap & 1;
        }
index e03043f..de18fff 100644 (file)
 { USB_DEVICE_VENDOR_SPEC(0x041e, 0x3f19) },
 
 /*
+ * Creative Technology, Ltd Live! Cam Sync HD [VF0770]
+ * The device advertises 8 formats, but only a rate of 48kHz is honored by the
+ * hardware and 24 bits give chopped audio, so only report the one working
+ * combination.
+ */
+{
+       USB_DEVICE(0x041e, 0x4095),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .fmt_bits = 16,
+                                       .iface = 3,
+                                       .altsetting = 4,
+                                       .altset_idx = 4,
+                                       .endpoint = 0x82,
+                                       .ep_attr = 0x05,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) { 48000 },
+                               },
+                       },
+                       {
+                               .ifnum = -1
+                       },
+               },
+       },
+},
+
+/*
  * HP Wireless Audio
  * When not ignored, causes instability issues for some users, forcing them to
  * skip the entire module.
index 6ee6d24..889c855 100644 (file)
@@ -1900,6 +1900,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_IFACE_DELAY),
        VENDOR_FLG(0x07fd, /* MOTU */
                   QUIRK_FLAG_VALIDATE_RATES),
+       VENDOR_FLG(0x1235, /* Focusrite Novation */
+                  QUIRK_FLAG_VALIDATE_RATES),
        VENDOR_FLG(0x152a, /* Thesycon devices */
                   QUIRK_FLAG_DSD_RAW),
        VENDOR_FLG(0x1de7, /* Phoenix Audio */
index bc82105..0893436 100644 (file)
@@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
        sec = find_section_by_name(elf, ".altinstructions");
        if (!sec) {
                sec = elf_create_section(elf, ".altinstructions",
-                                        SHF_ALLOC, size, 0);
+                                        SHF_ALLOC, 0, 0);
 
                if (!sec) {
                        WARN_ELF("elf_create_section");
index e5947fb..06b5c16 100644 (file)
@@ -292,7 +292,7 @@ static int decode_instructions(struct objtool_file *file)
                    !strcmp(sec->name, ".entry.text"))
                        sec->noinstr = true;
 
-               for (offset = 0; offset < sec->len; offset += insn->len) {
+               for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
                        insn = malloc(sizeof(*insn));
                        if (!insn) {
                                WARN("malloc failed");
@@ -307,7 +307,7 @@ static int decode_instructions(struct objtool_file *file)
                        insn->offset = offset;
 
                        ret = arch_decode_instruction(file->elf, sec, offset,
-                                                     sec->len - offset,
+                                                     sec->sh.sh_size - offset,
                                                      &insn->len, &insn->type,
                                                      &insn->immediate,
                                                      &insn->stack_ops);
@@ -349,9 +349,9 @@ static struct instruction *find_last_insn(struct objtool_file *file,
 {
        struct instruction *insn = NULL;
        unsigned int offset;
-       unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
+       unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
 
-       for (offset = sec->len - 1; offset >= end && !insn; offset--)
+       for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
                insn = find_insn(file, sec, offset);
 
        return insn;
@@ -389,7 +389,7 @@ static int add_dead_ends(struct objtool_file *file)
                insn = find_insn(file, reloc->sym->sec, reloc->addend);
                if (insn)
                        insn = list_prev_entry(insn, list);
-               else if (reloc->addend == reloc->sym->sec->len) {
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
                        insn = find_last_insn(file, reloc->sym->sec);
                        if (!insn) {
                                WARN("can't find unreachable insn at %s+0x%x",
@@ -424,7 +424,7 @@ reachable:
                insn = find_insn(file, reloc->sym->sec, reloc->addend);
                if (insn)
                        insn = list_prev_entry(insn, list);
-               else if (reloc->addend == reloc->sym->sec->len) {
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
                        insn = find_last_insn(file, reloc->sym->sec);
                        if (!insn) {
                                WARN("can't find reachable insn at %s+0x%x",
@@ -1561,14 +1561,14 @@ static int read_unwind_hints(struct objtool_file *file)
                return -1;
        }
 
-       if (sec->len % sizeof(struct unwind_hint)) {
+       if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
                WARN("struct unwind_hint size mismatch");
                return -1;
        }
 
        file->hints = true;
 
-       for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+       for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
                hint = (struct unwind_hint *)sec->data->d_buf + i;
 
                reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
index 8676c75..b18f005 100644 (file)
@@ -286,10 +286,9 @@ static int read_sections(struct elf *elf)
                                return -1;
                        }
                }
-               sec->len = sec->sh.sh_size;
 
                if (sec->sh.sh_flags & SHF_EXECINSTR)
-                       elf->text_size += sec->len;
+                       elf->text_size += sec->sh.sh_size;
 
                list_add_tail(&sec->list, &elf->sections);
                elf_hash_add(section, &sec->hash, sec->idx);
@@ -734,8 +733,8 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
        data->d_size = strlen(str) + 1;
        data->d_align = 1;
 
-       len = strtab->len;
-       strtab->len += data->d_size;
+       len = strtab->sh.sh_size;
+       strtab->sh.sh_size += data->d_size;
        strtab->changed = true;
 
        return len;
@@ -790,9 +789,9 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
        data->d_align = 1;
        data->d_type = ELF_T_SYM;
 
-       sym->idx = symtab->len / sizeof(sym->sym);
+       sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
 
-       symtab->len += data->d_size;
+       symtab->sh.sh_size += data->d_size;
        symtab->changed = true;
 
        symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -814,7 +813,7 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
                data->d_align = 4;
                data->d_type = ELF_T_WORD;
 
-               symtab_shndx->len += 4;
+               symtab_shndx->sh.sh_size += 4;
                symtab_shndx->changed = true;
        }
 
@@ -855,7 +854,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
        }
 
        sec->idx = elf_ndxscn(s);
-       sec->len = size;
        sec->changed = true;
 
        sec->data = elf_newdata(s);
index e343950..075d829 100644 (file)
@@ -38,7 +38,6 @@ struct section {
        Elf_Data *data;
        char *name;
        int idx;
-       unsigned int len;
        bool changed, text, rodata, noinstr;
 };
 
index dc9b7dd..b5865e2 100644 (file)
@@ -204,7 +204,7 @@ int orc_create(struct objtool_file *file)
 
                /* Add a section terminator */
                if (!empty) {
-                       orc_list_add(&orc_list, &null, sec, sec->len);
+                       orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
                        nr++;
                }
        }
index f1428e3..06c3eac 100644 (file)
@@ -58,22 +58,11 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
 {
 }
 
-static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off)
+static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
+                            unsigned long *off)
 {
-       switch (reloc->sym->type) {
-       case STT_FUNC:
-               *sec = reloc->sym->sec;
-               *off = reloc->sym->offset + reloc->addend;
-               return true;
-
-       case STT_SECTION:
-               *sec = reloc->sym->sec;
-               *off = reloc->addend;
-               return true;
-
-       default:
-               return false;
-       }
+       *sec = reloc->sym->sec;
+       *off = reloc->sym->offset + reloc->addend;
 }
 
 static int get_alt_entry(struct elf *elf, struct special_entry *entry,
@@ -109,13 +98,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
                return -1;
        }
-       if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) {
-               WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
-                          sec, offset + entry->orig,
-                          orig_reloc->sym->type,
-                          orig_reloc->sym->name);
-               return -1;
-       }
+
+       reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
 
        if (!entry->group || alt->new_len) {
                new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
@@ -133,13 +117,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                if (arch_is_retpoline(new_reloc->sym))
                        return 1;
 
-               if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) {
-                       WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
-                                 sec, offset + entry->new,
-                                 new_reloc->sym->type,
-                                 new_reloc->sym->name);
-                       return -1;
-               }
+               reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
 
                /* _ASM_EXTABLE_EX hack */
                if (alt->new_off >= 0x7ffffff0)
@@ -181,13 +159,13 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
                if (!sec)
                        continue;
 
-               if (sec->len % entry->size != 0) {
+               if (sec->sh.sh_size % entry->size != 0) {
                        WARN("%s size not a multiple of %d",
                             sec->name, entry->size);
                        return -1;
                }
 
-               nr_entries = sec->len / entry->size;
+               nr_entries = sec->sh.sh_size / entry->size;
 
                for (idx = 0; idx < nr_entries; idx++) {
                        alt = malloc(sizeof(*alt));
index 5a93145..ac35c61 100755 (executable)
@@ -16,7 +16,7 @@ assert sys.version_info >= (3, 7), "Python version is too old"
 
 from collections import namedtuple
 from enum import Enum, auto
-from typing import Iterable
+from typing import Iterable, Sequence
 
 import kunit_config
 import kunit_json
@@ -186,6 +186,26 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
                                exec_result.elapsed_time))
        return parse_result
 
+# Problem:
+# $ kunit.py run --json
+# works as one would expect and prints the parsed test results as JSON.
+# $ kunit.py run --json suite_name
+# would *not* pass suite_name as the filter_glob and print as json.
+# argparse will consider it to be another way of writing
+# $ kunit.py run --json=suite_name
+# i.e. it would run all tests, and dump the json to a `suite_name` file.
+# So we hackily automatically rewrite --json => --json=stdout
+pseudo_bool_flag_defaults = {
+               '--json': 'stdout',
+               '--raw_output': 'kunit',
+}
+def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+       def massage_arg(arg: str) -> str:
+               if arg not in pseudo_bool_flag_defaults:
+                       return arg
+               return  f'{arg}={pseudo_bool_flag_defaults[arg]}'
+       return list(map(massage_arg, argv))
+
 def add_common_opts(parser) -> None:
        parser.add_argument('--build_dir',
                            help='As in the make command, it specifies the build '
@@ -303,7 +323,7 @@ def main(argv, linux=None):
                                  help='Specifies the file to read results from.',
                                  type=str, nargs='?', metavar='input_file')
 
-       cli_args = parser.parse_args(argv)
+       cli_args = parser.parse_args(massage_argv(argv))
 
        if get_kernel_root_path():
                os.chdir(get_kernel_root_path())
index 619c455..1edcc83 100755 (executable)
@@ -408,6 +408,14 @@ class KUnitMainTest(unittest.TestCase):
                        self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
                        self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
 
+       def test_run_raw_output_does_not_take_positional_args(self):
+               # --raw_output is a string flag, but we don't want it to consume
+               # any positional arguments, only ones after an '='
+               self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+               kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+               self.linux_source_mock.run_kernel.assert_called_once_with(
+                       args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+
        def test_exec_timeout(self):
                timeout = 3453
                kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
index 13350cd..8e67a25 100755 (executable)
@@ -289,6 +289,12 @@ set_sysctl()
        run_cmd sysctl -q -w $*
 }
 
+# get sysctl values in NS-A
+get_sysctl()
+{
+       ${NSA_CMD} sysctl -n $*
+}
+
 ################################################################################
 # Setup for tests
 
@@ -1003,6 +1009,60 @@ ipv4_tcp_md5()
        run_cmd nettest -s -I ${NSA_DEV} -M ${MD5_PW} -m ${NS_NET}
        log_test $? 1 "MD5: VRF: Device must be a VRF - prefix"
 
+       test_ipv4_md5_vrf__vrf_server__no_bind_ifindex
+       test_ipv4_md5_vrf__global_server__bind_ifindex0
+}
+
+test_ipv4_md5_vrf__vrf_server__no_bind_ifindex()
+{
+       log_start
+       show_hint "Simulates applications using VRF without TCP_MD5SIG_FLAG_IFINDEX"
+       run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: VRF-bound server, unbound key accepts connection"
+
+       log_start
+       show_hint "Binding both the socket and the key is not required but it works"
+       run_cmd nettest -s -I ${VRF} -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: VRF-bound server, bound key accepts connection"
+}
+
+test_ipv4_md5_vrf__global_server__bind_ifindex0()
+{
+       # This particular test needs tcp_l3mdev_accept=1 for Global server to accept VRF connections
+       local old_tcp_l3mdev_accept
+       old_tcp_l3mdev_accept=$(get_sysctl net.ipv4.tcp_l3mdev_accept)
+       set_sysctl net.ipv4.tcp_l3mdev_accept=1
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 2 "MD5: VRF: Global server, Key bound to ifindex=0 rejects VRF connection"
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --force-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key bound to ifindex=0 accepts non-VRF connection"
+       log_start
+
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsb nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts VRF connection"
+
+       log_start
+       run_cmd nettest -s -M ${MD5_PW} -m ${NS_NET} --no-bind-key-ifindex &
+       sleep 1
+       run_cmd_nsc nettest -r ${NSA_IP} -X ${MD5_PW}
+       log_test $? 0 "MD5: VRF: Global server, key not bound to ifindex accepts non-VRF connection"
+
+       # restore value
+       set_sysctl net.ipv4.tcp_l3mdev_accept="$old_tcp_l3mdev_accept"
 }
 
 ipv4_tcp_novrf()
index d97bd68..72ee644 100644 (file)
@@ -9,6 +9,7 @@ TEST_PROGS = bridge_igmp.sh \
        gre_inner_v4_multipath.sh \
        gre_inner_v6_multipath.sh \
        gre_multipath.sh \
+       ip6_forward_instats_vrf.sh \
        ip6gre_inner_v4_multipath.sh \
        ip6gre_inner_v6_multipath.sh \
        ipip_flat_gre_key.sh \
index b802c14..e5e2fbe 100644 (file)
@@ -39,3 +39,5 @@ NETIF_CREATE=yes
 # Timeout (in seconds) before ping exits regardless of how many packets have
 # been sent or received
 PING_TIMEOUT=5
+# IPv6 traceroute utility name.
+TROUTE6=traceroute6
diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
new file mode 100755 (executable)
index 0000000..9f5b3e2
--- /dev/null
@@ -0,0 +1,172 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test ipv6 stats on the incoming if when forwarding with VRF
+
+ALL_TESTS="
+       ipv6_ping
+       ipv6_in_too_big_err
+       ipv6_in_hdr_err
+       ipv6_in_addr_err
+       ipv6_in_discard
+"
+
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 2001:1:1::2/64
+       ip -6 route add vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+}
+
+h1_destroy()
+{
+       ip -6 route del vrf v$h1 2001:1:2::/64 via 2001:1:1::1
+       simple_if_fini $h1 2001:1:1::2/64
+}
+
+router_create()
+{
+       vrf_create router
+       __simple_if_init $rtr1 router 2001:1:1::1/64
+       __simple_if_init $rtr2 router 2001:1:2::1/64
+       mtu_set $rtr2 1280
+}
+
+router_destroy()
+{
+       mtu_restore $rtr2
+       __simple_if_fini $rtr2 2001:1:2::1/64
+       __simple_if_fini $rtr1 2001:1:1::1/64
+       vrf_destroy router
+}
+
+h2_create()
+{
+       simple_if_init $h2 2001:1:2::2/64
+       ip -6 route add vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+       mtu_set $h2 1280
+}
+
+h2_destroy()
+{
+       mtu_restore $h2
+       ip -6 route del vrf v$h2 2001:1:1::/64 via 2001:1:2::1
+       simple_if_fini $h2 2001:1:2::2/64
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       rtr1=${NETIFS[p2]}
+
+       rtr2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       vrf_prepare
+       h1_create
+       router_create
+       h2_create
+
+       forwarding_enable
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       forwarding_restore
+
+       h2_destroy
+       router_destroy
+       h1_destroy
+       vrf_cleanup
+}
+
+ipv6_in_too_big_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Send too big packets
+       ip vrf exec $vrf_name \
+               $PING6 -s 1300 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InTooBigErrors"
+}
+
+ipv6_in_hdr_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Send packets with hop limit 1, easiest with traceroute6 as some ping6
+       # doesn't allow hop limit to be specified
+       ip vrf exec $vrf_name \
+               $TROUTE6 2001:1:2::2 &> /dev/null
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InHdrErrors"
+}
+
+ipv6_in_addr_err()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+       local vrf_name=$(master_name_get $h1)
+
+       # Disable forwarding temporary while sending the packet
+       sysctl -qw net.ipv6.conf.all.forwarding=0
+       ip vrf exec $vrf_name \
+               $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+       sysctl -qw net.ipv6.conf.all.forwarding=1
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InAddrErrors"
+}
+
+ipv6_in_discard()
+{
+       RET=0
+
+       local t0=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+       local vrf_name=$(master_name_get $h1)
+
+       # Add a policy to discard
+       ip xfrm policy add dst 2001:1:2::2/128 dir fwd action block
+       ip vrf exec $vrf_name \
+               $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
+       ip xfrm policy del dst 2001:1:2::2/128 dir fwd
+
+       local t1=$(ipv6_stats_get $rtr1 Ip6InDiscards)
+       test "$((t1 - t0))" -ne 0
+       check_err $?
+       log_test "Ip6InDiscards"
+}
+ipv6_ping()
+{
+       RET=0
+
+       ping6_test $h1 2001:1:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
index e7fc5c3..92087d4 100644 (file)
@@ -751,6 +751,14 @@ qdisc_parent_stats_get()
            | jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
 }
 
+ipv6_stats_get()
+{
+       local dev=$1; shift
+       local stat=$1; shift
+
+       cat /proc/net/dev_snmp6/$dev | grep "^$stat" | cut -f2
+}
+
 humanize()
 {
        local speed=$1; shift
index bd62883..b599003 100644 (file)
@@ -28,6 +28,7 @@
 #include <unistd.h>
 #include <time.h>
 #include <errno.h>
+#include <getopt.h>
 
 #include <linux/xfrm.h>
 #include <linux/ipsec.h>
@@ -101,6 +102,8 @@ struct sock_args {
                struct sockaddr_in6 v6;
        } md5_prefix;
        unsigned int prefix_len;
+       /* 0: default, -1: force off, +1: force on */
+       int bind_key_ifindex;
 
        /* expected addresses and device index for connection */
        const char *expected_dev;
@@ -271,11 +274,14 @@ static int tcp_md5sig(int sd, void *addr, socklen_t alen, struct sock_args *args
        }
        memcpy(&md5sig.tcpm_addr, addr, alen);
 
-       if (args->ifindex) {
+       if ((args->ifindex && args->bind_key_ifindex >= 0) || args->bind_key_ifindex >= 1) {
                opt = TCP_MD5SIG_EXT;
                md5sig.tcpm_flags |= TCP_MD5SIG_FLAG_IFINDEX;
 
                md5sig.tcpm_ifindex = args->ifindex;
+               log_msg("TCP_MD5SIG_FLAG_IFINDEX set tcpm_ifindex=%d\n", md5sig.tcpm_ifindex);
+       } else {
+               log_msg("TCP_MD5SIG_FLAG_IFINDEX off\n", md5sig.tcpm_ifindex);
        }
 
        rc = setsockopt(sd, IPPROTO_TCP, opt, &md5sig, sizeof(md5sig));
@@ -1822,6 +1828,14 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
 }
 
 #define GETOPT_STR  "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
+#define OPT_FORCE_BIND_KEY_IFINDEX 1001
+#define OPT_NO_BIND_KEY_IFINDEX 1002
+
+static struct option long_opts[] = {
+       {"force-bind-key-ifindex", 0, 0, OPT_FORCE_BIND_KEY_IFINDEX},
+       {"no-bind-key-ifindex", 0, 0, OPT_NO_BIND_KEY_IFINDEX},
+       {0, 0, 0, 0}
+};
 
 static void print_usage(char *prog)
 {
@@ -1858,6 +1872,10 @@ static void print_usage(char *prog)
        "    -M password   use MD5 sum protection\n"
        "    -X password   MD5 password for client mode\n"
        "    -m prefix/len prefix and length to use for MD5 key\n"
+       "    --no-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX off\n"
+       "    --force-bind-key-ifindex: Force TCP_MD5SIG_FLAG_IFINDEX on\n"
+       "        (default: only if -I is passed)\n"
+       "\n"
        "    -g grp        multicast group (e.g., 239.1.1.1)\n"
        "    -i            interactive mode (default is echo and terminate)\n"
        "\n"
@@ -1893,7 +1911,7 @@ int main(int argc, char *argv[])
         * process input args
         */
 
-       while ((rc = getopt(argc, argv, GETOPT_STR)) != -1) {
+       while ((rc = getopt_long(argc, argv, GETOPT_STR, long_opts, NULL)) != -1) {
                switch (rc) {
                case 'B':
                        both_mode = 1;
@@ -1966,6 +1984,12 @@ int main(int argc, char *argv[])
                case 'M':
                        args.password = optarg;
                        break;
+               case OPT_FORCE_BIND_KEY_IFINDEX:
+                       args.bind_key_ifindex = 1;
+                       break;
+               case OPT_NO_BIND_KEY_IFINDEX:
+                       args.bind_key_ifindex = -1;
+                       break;
                case 'X':
                        args.client_pw = optarg;
                        break;