Merge tag 'iommu-fix-v6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 11 Dec 2022 17:49:39 +0000 (09:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 11 Dec 2022 17:49:39 +0000 (09:49 -0800)
Pull iommu fix from Joerg Roedel:

 - Fix device mask to catch all affected devices in the recently added
   quirk for QAT devices in the Intel VT-d driver.

* tag 'iommu-fix-v6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Fix buggy QAT device mask

142 files changed:
.clang-format
.mailmap
Documentation/loongarch/booting.rst [new file with mode: 0644]
Documentation/loongarch/index.rst
Documentation/translations/zh_CN/loongarch/booting.rst [new file with mode: 0644]
Documentation/translations/zh_CN/loongarch/index.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/halt-polling.rst [moved from Documentation/virt/kvm/x86/halt-polling.rst with 92% similarity]
Documentation/virt/kvm/index.rst
Documentation/virt/kvm/x86/index.rst
MAINTAINERS
arch/arm/boot/dts/imx7s.dtsi
arch/arm/mach-at91/sama5.c
arch/arm/mm/fault.c
arch/arm/mm/fault.h
arch/arm64/mm/dma-mapping.c
arch/loongarch/include/asm/smp.h
arch/loongarch/kernel/smp.c
arch/loongarch/mm/tlbex.S
arch/s390/kvm/vsie.c
arch/x86/kvm/x86.c
drivers/ata/libahci_platform.c
drivers/bluetooth/btusb.c
drivers/gpio/gpio-amd8111.c
drivers/gpio/gpio-rockchip.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-ite.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-quirks.c
drivers/hid/hid-uclogic-core.c
drivers/hid/hid-uclogic-rdesc.c
drivers/hid/i2c-hid/Kconfig
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/net/bonding/bond_main.c
drivers/net/can/can327.c
drivers/net/can/slcan/slcan-core.c
drivers/net/can/usb/esd_usb.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/sja1105/sja1105_devlink.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hisi_femac.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
drivers/net/ethernet/microchip/encx24j600-regmap.c
drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
drivers/net/ethernet/microsoft/mana/gdma.h
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/netronome/nfp/nfdk/dp.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/cc2520.c
drivers/net/macsec.c
drivers/net/mdio/fwnode_mdio.c
drivers/net/mdio/of_mdio.c
drivers/net/phy/mdio_device.c
drivers/net/phy/mxl-gpy.c
drivers/net/plip/plip.c
drivers/net/thunderbolt.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wwan/iosm/iosm_ipc_mux.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/rx.c
drivers/net/xen-netfront.c
drivers/nvme/host/core.c
drivers/platform/x86/amd/pmc.c
drivers/s390/net/qeth_l2_main.c
fs/fscache/cookie.c
include/linux/cgroup.h
include/linux/swapops.h
include/net/bluetooth/hci.h
include/net/ping.h
include/trace/events/fscache.h
io_uring/io_uring.c
ipc/sem.c
kernel/cgroup/cgroup-internal.h
kernel/sysctl.c
mm/gup.c
mm/memcontrol.c
mm/mmap.c
mm/shmem.c
net/bluetooth/6lowpan.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_codec.c
net/bluetooth/hci_core.c
net/bluetooth/hci_request.c
net/bluetooth/hci_sync.c
net/bluetooth/iso.c
net/bluetooth/l2cap_core.c
net/can/af_can.c
net/dsa/tag_hellcreek.c
net/dsa/tag_ksz.c
net/dsa/tag_sja1105.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/ip_gre.c
net/ipv4/ping.c
net/ipv6/ip6_output.c
net/mac802154/iface.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nft_set_pipapo.c
net/nfc/nci/ntf.c
net/tipc/link.c
net/tipc/node.c
net/unix/diag.c
tools/testing/selftests/cgroup/test_kmem.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/af_unix/Makefile
tools/testing/selftests/net/af_unix/diag_uid.c [new file with mode: 0644]
tools/testing/selftests/net/config
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/rtnetlink.sh
tools/testing/selftests/net/toeplitz.sh

index 1247d54..8d01225 100644 (file)
@@ -535,6 +535,7 @@ ForEachMacros:
   - 'perf_hpp_list__for_each_sort_list_safe'
   - 'perf_pmu__for_each_hybrid_pmu'
   - 'ping_portaddr_for_each_entry'
+  - 'ping_portaddr_for_each_entry_rcu'
   - 'plist_for_each'
   - 'plist_for_each_continue'
   - 'plist_for_each_entry'
index bbb6a07..2301c29 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -287,6 +287,7 @@ Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
 Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
 Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
 Matthieu CASTET <castet.matthieu@free.fr>
+Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
 Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
 Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
 Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
@@ -372,6 +373,8 @@ Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
 Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
 Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
 Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
+Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com>
+Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
 Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
diff --git a/Documentation/loongarch/booting.rst b/Documentation/loongarch/booting.rst
new file mode 100644 (file)
index 0000000..91eccd4
--- /dev/null
@@ -0,0 +1,42 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+Booting Linux/LoongArch
+=======================
+
+:Author: Yanteng Si <siyanteng@loongson.cn>
+:Date:   18 Nov 2022
+
+Information passed from BootLoader to kernel
+============================================
+
+LoongArch supports ACPI and FDT. The information that needs to be passed
+to the kernel includes the memmap, the initrd, the command line, optionally
+the ACPI/FDT tables, and so on.
+
+The kernel is passed the following arguments on `kernel_entry` :
+
+      - a0 = efi_boot: `efi_boot` is a flag indicating whether
+        this boot environment is fully UEFI-compliant.
+
+      - a1 = cmdline: `cmdline` is a pointer to the kernel command line.
+
+      - a2 = systemtable: `systemtable` points to the EFI system table.
+        All pointers involved at this stage are in physical addresses.
+
+Header of Linux/LoongArch kernel images
+=======================================
+
+Linux/LoongArch kernel images are EFI images. Being PE files, they have
+a 64-byte header structured like::
+
+       u32     MZ_MAGIC                /* "MZ", MS-DOS header */
+       u32     res0 = 0                /* Reserved */
+       u64     kernel_entry            /* Kernel entry point */
+       u64     _end - _text            /* Kernel image effective size */
+       u64     load_offset             /* Kernel image load offset from start of RAM */
+       u64     res1 = 0                /* Reserved */
+       u64     res2 = 0                /* Reserved */
+       u64     res3 = 0                /* Reserved */
+       u32     LINUX_PE_MAGIC          /* Magic number */
+       u32     pe_header - _head       /* Offset to the PE header */
index aaba648..c779bfa 100644 (file)
@@ -9,6 +9,7 @@ LoongArch Architecture
    :numbered:
 
    introduction
+   booting
    irq-chip-model
 
    features
diff --git a/Documentation/translations/zh_CN/loongarch/booting.rst b/Documentation/translations/zh_CN/loongarch/booting.rst
new file mode 100644 (file)
index 0000000..fb6440c
--- /dev/null
@@ -0,0 +1,48 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/loongarch/booting.rst
+
+:翻译:
+
+ 司延腾 Yanteng Si <siyanteng@loongson.cn>
+
+====================
+启动 Linux/LoongArch
+====================
+
+:作者: 司延腾 <siyanteng@loongson.cn>
+:日期: 2022年11月18日
+
+BootLoader传递给内核的信息
+==========================
+
+LoongArch支持ACPI和FDT启动,需要传递给内核的信息包括memmap、initrd、cmdline、可
+选的ACPI/FDT表等。
+
+内核在 `kernel_entry` 入口处被传递以下参数:
+
+      - a0 = efi_boot: `efi_boot` 是一个标志,表示这个启动环境是否完全符合UEFI
+        的要求。
+
+      - a1 = cmdline: `cmdline` 是一个指向内核命令行的指针。
+
+      - a2 = systemtable: `systemtable` 指向EFI的系统表,在这个阶段涉及的所有
+        指针都是物理地址。
+
+Linux/LoongArch内核镜像文件头
+=============================
+
+内核镜像是EFI镜像。作为PE文件,它们有一个64字节的头部结构体,如下所示::
+
+       u32     MZ_MAGIC                /* "MZ", MS-DOS 头 */
+       u32     res0 = 0                /* 保留 */
+       u64     kernel_entry            /* 内核入口点 */
+       u64     _end - _text            /* 内核镜像有效大小 */
+       u64     load_offset             /* 加载内核镜像相对内存起始地址的偏移量 */
+       u64     res1 = 0                /* 保留 */
+       u64     res2 = 0                /* 保留 */
+       u64     res3 = 0                /* 保留 */
+       u32     LINUX_PE_MAGIC          /* 魔术数 */
+       u32     pe_header - _head       /* 到PE头的偏移量 */
index 7d23eb7..0273a08 100644 (file)
@@ -14,6 +14,7 @@ LoongArch体系结构
    :numbered:
 
    introduction
+   booting
    irq-chip-model
 
    features
index eee9f85..896914e 100644 (file)
@@ -7213,14 +7213,13 @@ veto the transition.
 :Parameters: args[0] is the maximum poll time in nanoseconds
 :Returns: 0 on success; -1 on error
 
-This capability overrides the kvm module parameter halt_poll_ns for the
-target VM.
-
-VCPU polling allows a VCPU to poll for wakeup events instead of immediately
-scheduling during guest halts. The maximum time a VCPU can spend polling is
-controlled by the kvm module parameter halt_poll_ns. This capability allows
-the maximum halt time to specified on a per-VM basis, effectively overriding
-the module parameter for the target VM.
+KVM_CAP_HALT_POLL overrides the kvm.halt_poll_ns module parameter to set the
+maximum halt-polling time for all vCPUs in the target VM. This capability can
+be invoked at any time and any number of times to dynamically change the
+maximum halt-polling time.
+
+See Documentation/virt/kvm/halt-polling.rst for more information on halt
+polling.
 
 7.21 KVM_CAP_X86_USER_SPACE_MSR
 -------------------------------
similarity index 92%
rename from Documentation/virt/kvm/x86/halt-polling.rst
rename to Documentation/virt/kvm/halt-polling.rst
index 4922e4a..3fae39b 100644 (file)
@@ -119,6 +119,19 @@ These module parameters can be set from the debugfs files in:
 Note: that these module parameters are system wide values and are not able to
       be tuned on a per vm basis.
 
+Any changes to these parameters will be picked up by new and existing vCPUs the
+next time they halt, with the notable exception of VMs using KVM_CAP_HALT_POLL
+(see next section).
+
+KVM_CAP_HALT_POLL
+=================
+
+KVM_CAP_HALT_POLL is a VM capability that allows userspace to override halt_poll_ns
+on a per-VM basis. VMs using KVM_CAP_HALT_POLL ignore halt_poll_ns completely (but
+still obey halt_poll_ns_grow, halt_poll_ns_grow_start, and halt_poll_ns_shrink).
+
+See Documentation/virt/kvm/api.rst for more information on this capability.
+
 Further Notes
 =============
 
index e0a2c74..ad13ec5 100644 (file)
@@ -17,4 +17,5 @@ KVM
 
    locking
    vcpu-requests
+   halt-polling
    review-checklist
index 7ff5888..9ece6b8 100644 (file)
@@ -10,7 +10,6 @@ KVM for x86 systems
    amd-memory-encryption
    cpuid
    errata
-   halt-polling
    hypercalls
    mmu
    msr
index 1daadaa..886d3f6 100644 (file)
@@ -5299,7 +5299,7 @@ M:        Johannes Weiner <hannes@cmpxchg.org>
 M:     Michal Hocko <mhocko@kernel.org>
 M:     Roman Gushchin <roman.gushchin@linux.dev>
 M:     Shakeel Butt <shakeelb@google.com>
-R:     Muchun Song <songmuchun@bytedance.com>
+R:     Muchun Song <muchun.song@linux.dev>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
 S:     Maintained
@@ -9439,7 +9439,7 @@ F:        drivers/net/ethernet/huawei/hinic/
 
 HUGETLB SUBSYSTEM
 M:     Mike Kravetz <mike.kravetz@oracle.com>
-M:     Muchun Song <songmuchun@bytedance.com>
+M:     Muchun Song <muchun.song@linux.dev>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-kernel-mm-hugepages
index 03d2e85..0fc9e6b 100644 (file)
                        clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
                };
 
-               gpmi: nand-controller@33002000 {
+               gpmi: nand-controller@33002000{
                        compatible = "fsl,imx7d-gpmi-nand";
                        #address-cells = <1>;
-                       #size-cells = <0>;
+                       #size-cells = <1>;
                        reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
                        reg-names = "gpmi-nand", "bch";
                        interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
index 67ed68f..bf2b5c6 100644 (file)
@@ -26,7 +26,7 @@ static void sama5_l2c310_write_sec(unsigned long val, unsigned reg)
 static void __init sama5_secure_cache_init(void)
 {
        sam_secure_init();
-       if (sam_linux_is_optee_available())
+       if (IS_ENABLED(CONFIG_OUTER_CACHE) && sam_linux_is_optee_available())
                outer_cache.write_sec = sama5_l2c310_write_sec;
 }
 
index 46cccd6..de988cb 100644 (file)
@@ -105,6 +105,19 @@ static inline bool is_write_fault(unsigned int fsr)
        return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
 }
 
+static inline bool is_translation_fault(unsigned int fsr)
+{
+       int fs = fsr_fs(fsr);
+#ifdef CONFIG_ARM_LPAE
+       if ((fs & FS_MMU_NOLL_MASK) == FS_TRANS_NOLL)
+               return true;
+#else
+       if (fs == FS_L1_TRANS || fs == FS_L2_TRANS)
+               return true;
+#endif
+       return false;
+}
+
 static void die_kernel_fault(const char *msg, struct mm_struct *mm,
                             unsigned long addr, unsigned int fsr,
                             struct pt_regs *regs)
@@ -140,7 +153,8 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
        if (addr < PAGE_SIZE) {
                msg = "NULL pointer dereference";
        } else {
-               if (kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
+               if (is_translation_fault(fsr) &&
+                   kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
                        return;
 
                msg = "paging request";
@@ -208,7 +222,7 @@ static inline bool is_permission_fault(unsigned int fsr)
 {
        int fs = fsr_fs(fsr);
 #ifdef CONFIG_ARM_LPAE
-       if ((fs & FS_PERM_NOLL_MASK) == FS_PERM_NOLL)
+       if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
                return true;
 #else
        if (fs == FS_L1_PERM || fs == FS_L2_PERM)
index 83b5ab3..54927ba 100644 (file)
@@ -14,8 +14,9 @@
 
 #ifdef CONFIG_ARM_LPAE
 #define FSR_FS_AEA             17
+#define FS_TRANS_NOLL          0x4
 #define FS_PERM_NOLL           0xC
-#define FS_PERM_NOLL_MASK      0x3C
+#define FS_MMU_NOLL_MASK       0x3C
 
 static inline int fsr_fs(unsigned int fsr)
 {
@@ -23,8 +24,10 @@ static inline int fsr_fs(unsigned int fsr)
 }
 #else
 #define FSR_FS_AEA             22
-#define FS_L1_PERM             0xD
-#define FS_L2_PERM             0xF
+#define FS_L1_TRANS            0x5
+#define FS_L2_TRANS            0x7
+#define FS_L1_PERM             0xD
+#define FS_L2_PERM             0xF
 
 static inline int fsr_fs(unsigned int fsr)
 {
index 3cb101e..5240f6a 100644 (file)
@@ -36,7 +36,22 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
 {
        unsigned long start = (unsigned long)page_address(page);
 
-       dcache_clean_poc(start, start + size);
+       /*
+        * The architecture only requires a clean to the PoC here in order to
+        * meet the requirements of the DMA API. However, some vendors (i.e.
+        * Qualcomm) abuse the DMA API for transferring buffers from the
+        * non-secure to the secure world, resetting the system if a non-secure
+        * access shows up after the buffer has been transferred:
+        *
+        * https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org
+        *
+        * Using clean+invalidate appears to make this issue less likely, but
+        * the drivers themselves still need fixing as the CPU could issue a
+        * speculative read from the buffer via the linear mapping irrespective
+        * of the cache maintenance we use. Once the drivers are fixed, we can
+        * relax this to a clean operation.
+        */
+       dcache_clean_inval_poc(start, start + size);
 }
 
 #ifdef CONFIG_IOMMU_DMA
index 3dd172d..d826873 100644 (file)
@@ -78,16 +78,6 @@ extern void calculate_cpu_foreign_map(void);
  */
 extern void show_ipi_list(struct seq_file *p, int prec);
 
-/*
- * This function sends a 'reschedule' IPI to another CPU.
- * it goes straight through and wastes no time serializing
- * anything. Worst case is that we lose a reschedule ...
- */
-static inline void smp_send_reschedule(int cpu)
-{
-       loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
-}
-
 static inline void arch_send_call_function_single_ipi(int cpu)
 {
        loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION);
index 6ed72f7..14508d4 100644 (file)
@@ -149,6 +149,17 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
                ipi_write_action(cpu_logical_map(i), (u32)action);
 }
 
+/*
+ * This function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+void smp_send_reschedule(int cpu)
+{
+       loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
+}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
+
 irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
 {
        unsigned int action;
index d8ee8fb..58781c6 100644 (file)
@@ -10,6 +10,8 @@
 #include <asm/regdef.h>
 #include <asm/stackframe.h>
 
+#define INVTLB_ADDR_GFALSE_AND_ASID    5
+
 #define PTRS_PER_PGD_BITS      (PAGE_SHIFT - 3)
 #define PTRS_PER_PUD_BITS      (PAGE_SHIFT - 3)
 #define PTRS_PER_PMD_BITS      (PAGE_SHIFT - 3)
@@ -136,13 +138,10 @@ tlb_huge_update_load:
        ori             t0, ra, _PAGE_VALID
        st.d            t0, t1, 0
 #endif
-       tlbsrch
-       addu16i.d       t1, zero, -(CSR_TLBIDX_EHINV >> 16)
-       addi.d          ra, t1, 0
-       csrxchg         ra, t1, LOONGARCH_CSR_TLBIDX
-       tlbwr
-
-       csrxchg         zero, t1, LOONGARCH_CSR_TLBIDX
+       csrrd           ra, LOONGARCH_CSR_ASID
+       csrrd           t1, LOONGARCH_CSR_BADV
+       andi            ra, ra, CSR_ASID_ASID
+       invtlb          INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
 
        /*
         * A huge PTE describes an area the size of the
@@ -287,13 +286,11 @@ tlb_huge_update_store:
        ori             t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
        st.d            t0, t1, 0
 #endif
-       tlbsrch
-       addu16i.d       t1, zero, -(CSR_TLBIDX_EHINV >> 16)
-       addi.d          ra, t1, 0
-       csrxchg         ra, t1, LOONGARCH_CSR_TLBIDX
-       tlbwr
+       csrrd           ra, LOONGARCH_CSR_ASID
+       csrrd           t1, LOONGARCH_CSR_BADV
+       andi            ra, ra, CSR_ASID_ASID
+       invtlb          INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
 
-       csrxchg         zero, t1, LOONGARCH_CSR_TLBIDX
        /*
         * A huge PTE describes an area the size of the
         * configured huge page size. This is twice the
@@ -436,6 +433,11 @@ tlb_huge_update_modify:
        ori             t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
        st.d            t0, t1, 0
 #endif
+       csrrd           ra, LOONGARCH_CSR_ASID
+       csrrd           t1, LOONGARCH_CSR_BADV
+       andi            ra, ra, CSR_ASID_ASID
+       invtlb          INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
+
        /*
         * A huge PTE describes an area the size of the
         * configured huge page size. This is twice the
@@ -466,7 +468,7 @@ tlb_huge_update_modify:
        addu16i.d       t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
        csrxchg         t1, t0, LOONGARCH_CSR_TLBIDX
 
-       tlbwr
+       tlbfill
 
        /* Reset default page size */
        addu16i.d       t0, zero, (CSR_TLBIDX_PS >> 16)
index 94138f8..ace2541 100644 (file)
@@ -546,8 +546,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
                scb_s->eca |= scb_o->eca & ECA_CEI;
        /* Epoch Extension */
-       if (test_kvm_facility(vcpu->kvm, 139))
+       if (test_kvm_facility(vcpu->kvm, 139)) {
                scb_s->ecd |= scb_o->ecd & ECD_MEF;
+               scb_s->epdx = scb_o->epdx;
+       }
 
        /* etoken */
        if (test_kvm_facility(vcpu->kvm, 156))
index 2835bd7..69227f7 100644 (file)
@@ -10574,8 +10574,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                                vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
                                vcpu->mmio_needed = 0;
                                r = 0;
+                               goto out;
                        }
-                       goto out;
                }
                if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
                        /* Page is swapped out. Do synthetic halt */
index ddf17e2..b9e336b 100644 (file)
@@ -109,7 +109,7 @@ struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv, const char *con
        int i;
 
        for (i = 0; i < hpriv->n_clks; i++) {
-               if (!strcmp(hpriv->clks[i].id, con_id))
+               if (hpriv->clks[i].id && !strcmp(hpriv->clks[i].id, con_id))
                        return hpriv->clks[i].clk;
        }
 
index 2719638..f050189 100644 (file)
@@ -2056,6 +2056,11 @@ static int btusb_setup_csr(struct hci_dev *hdev)
 
        rp = (struct hci_rp_read_local_version *)skb->data;
 
+       bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x; LMP ver=%u subver=%04x; manufacturer=%u",
+               le16_to_cpu(rp->hci_ver), le16_to_cpu(rp->hci_rev),
+               le16_to_cpu(rp->lmp_ver), le16_to_cpu(rp->lmp_subver),
+               le16_to_cpu(rp->manufacturer));
+
        /* Detect a wide host of Chinese controllers that aren't CSR.
         *
         * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891
@@ -2118,6 +2123,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
                 * without these the controller will lock up.
                 */
                set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+               set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
                set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
                set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
 
index 14e6b3e..6f3ded6 100644 (file)
@@ -226,7 +226,10 @@ found:
                ioport_unmap(gp.pm);
                goto out;
        }
+       return 0;
+
 out:
+       pci_dev_put(pdev);
        return err;
 }
 
@@ -234,6 +237,7 @@ static void __exit amd_gpio_exit(void)
 {
        gpiochip_remove(&gp.chip);
        ioport_unmap(gp.pm);
+       pci_dev_put(gp.pdev);
 }
 
 module_init(amd_gpio_init);
index 870910b..200e43a 100644 (file)
@@ -610,6 +610,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
                        return -ENODATA;
 
                pctldev = of_pinctrl_get(pctlnp);
+               of_node_put(pctlnp);
                if (!pctldev)
                        return -ENODEV;
 
index 4756ea0..a70522a 100644 (file)
@@ -526,12 +526,13 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
        if (ret)
                return ret;
 
+       /* From this point, the .release() function cleans up gpio_device */
+       gdev->dev.release = gpiodevice_release;
+
        ret = gpiochip_sysfs_register(gdev);
        if (ret)
                goto err_remove_device;
 
-       /* From this point, the .release() function cleans up gpio_device */
-       gdev->dev.release = gpiodevice_release;
        dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base,
                gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic");
 
@@ -597,10 +598,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
        struct fwnode_handle *fwnode = NULL;
        struct gpio_device *gdev;
        unsigned long flags;
-       int base = gc->base;
        unsigned int i;
+       u32 ngpios = 0;
+       int base = 0;
        int ret = 0;
-       u32 ngpios;
 
        if (gc->fwnode)
                fwnode = gc->fwnode;
@@ -647,17 +648,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
        else
                gdev->owner = THIS_MODULE;
 
-       gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
-       if (!gdev->descs) {
-               ret = -ENOMEM;
-               goto err_free_dev_name;
-       }
-
        /*
         * Try the device properties if the driver didn't supply the number
         * of GPIO lines.
         */
-       if (gc->ngpio == 0) {
+       ngpios = gc->ngpio;
+       if (ngpios == 0) {
                ret = device_property_read_u32(&gdev->dev, "ngpios", &ngpios);
                if (ret == -ENODATA)
                        /*
@@ -668,7 +664,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
                         */
                        ngpios = 0;
                else if (ret)
-                       goto err_free_descs;
+                       goto err_free_dev_name;
 
                gc->ngpio = ngpios;
        }
@@ -676,13 +672,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
        if (gc->ngpio == 0) {
                chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
                ret = -EINVAL;
-               goto err_free_descs;
+               goto err_free_dev_name;
        }
 
        if (gc->ngpio > FASTPATH_NGPIO)
                chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
                          gc->ngpio, FASTPATH_NGPIO);
 
+       gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL);
+       if (!gdev->descs) {
+               ret = -ENOMEM;
+               goto err_free_dev_name;
+       }
+
        gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL);
        if (!gdev->label) {
                ret = -ENOMEM;
@@ -701,11 +703,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
         * it may be a pipe dream. It will not happen before we get rid
         * of the sysfs interface anyways.
         */
+       base = gc->base;
        if (base < 0) {
                base = gpiochip_find_base(gc->ngpio);
                if (base < 0) {
-                       ret = base;
                        spin_unlock_irqrestore(&gpio_lock, flags);
+                       ret = base;
+                       base = 0;
                        goto err_free_label;
                }
                /*
@@ -816,6 +820,11 @@ err_remove_of_chip:
 err_free_gpiochip_mask:
        gpiochip_remove_pin_ranges(gc);
        gpiochip_free_valid_mask(gc);
+       if (gdev->dev.release) {
+               /* release() has been registered by gpiochip_setup_dev() */
+               put_device(&gdev->dev);
+               goto err_print_message;
+       }
 err_remove_from_list:
        spin_lock_irqsave(&gpio_lock, flags);
        list_del(&gdev->list);
@@ -829,13 +838,14 @@ err_free_dev_name:
 err_free_ida:
        ida_free(&gpio_ida, gdev->id);
 err_free_gdev:
+       kfree(gdev);
+err_print_message:
        /* failures here can mean systems won't boot... */
        if (ret != -EPROBE_DEFER) {
                pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
-                      gdev->base, gdev->base + gdev->ngpio - 1,
+                      base, base + (int)ngpios - 1,
                       gc->label ? : "generic", ret);
        }
-       kfree(gdev);
        return ret;
 }
 EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
index 1122bd4..4d780e4 100644 (file)
@@ -907,13 +907,13 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
 
 
 /**
- * sdma_v4_0_gfx_stop - stop the gfx async dma engines
+ * sdma_v4_0_gfx_enable - enable the gfx async dma engines
  *
  * @adev: amdgpu_device pointer
- *
- * Stop the gfx async dma ring buffers (VEGA10).
+ * @enable: enable SDMA RB/IB
+ * control the gfx async dma ring buffers (VEGA10).
  */
-static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
        u32 rb_cntl, ib_cntl;
        int i;
@@ -922,10 +922,10 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
-               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
                WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
                ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
-               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0);
                WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
        }
 }
@@ -1044,7 +1044,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
        int i;
 
        if (!enable) {
-               sdma_v4_0_gfx_stop(adev);
+               sdma_v4_0_gfx_enable(adev, enable);
                sdma_v4_0_rlc_stop(adev);
                if (adev->sdma.has_page_queue)
                        sdma_v4_0_page_stop(adev);
@@ -1960,8 +1960,10 @@ static int sdma_v4_0_suspend(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SMU saves SDMA state for us */
-       if (adev->in_s0ix)
+       if (adev->in_s0ix) {
+               sdma_v4_0_gfx_enable(adev, false);
                return 0;
+       }
 
        return sdma_v4_0_hw_fini(adev);
 }
@@ -1971,8 +1973,12 @@ static int sdma_v4_0_resume(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SMU restores SDMA state for us */
-       if (adev->in_s0ix)
+       if (adev->in_s0ix) {
+               sdma_v4_0_enable(adev, true);
+               sdma_v4_0_gfx_enable(adev, true);
+               amdgpu_ttm_set_buffer_funcs_status(adev, true);
                return 0;
+       }
 
        return sdma_v4_0_hw_init(adev);
 }
index 630f339..a0207a8 100644 (file)
@@ -1153,7 +1153,7 @@ struct vba_vars_st {
        double UrgBurstFactorLumaPre[DC__NUM_DPP__MAX];
        double UrgBurstFactorChromaPre[DC__NUM_DPP__MAX];
        bool NotUrgentLatencyHidingPre[DC__NUM_DPP__MAX];
-       bool LinkCapacitySupport[DC__NUM_DPP__MAX];
+       bool LinkCapacitySupport[DC__VOLTAGE_STATES];
        bool VREADY_AT_OR_AFTER_VSYNC[DC__NUM_DPP__MAX];
        unsigned int MIN_DST_Y_NEXT_START[DC__NUM_DPP__MAX];
        unsigned int VFrontPorch[DC__NUM_DPP__MAX];
index 40d8ca3..aa51c61 100644 (file)
@@ -2720,6 +2720,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
         * if supported. In any case the default RGB888 format is added
         */
 
+       /* Default 8bit RGB fallback */
+       output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+
        if (max_bpc >= 16 && info->bpc == 16) {
                if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
                        output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
@@ -2753,9 +2756,6 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
        if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444)
                output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
 
-       /* Default 8bit RGB fallback */
-       output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
-
        *num_output_fmts = i;
 
        return output_fmts;
index 3c35619..eb24322 100644 (file)
@@ -931,9 +931,9 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
                &pdata->bridge.encoder->crtc->state->adjusted_mode;
        u8 hsync_polarity = 0, vsync_polarity = 0;
 
-       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
                hsync_polarity = CHA_HSYNC_POLARITY;
-       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                vsync_polarity = CHA_VSYNC_POLARITY;
 
        ti_sn65dsi86_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
index 35138f8..b602cd7 100644 (file)
@@ -571,12 +571,20 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-       int ret;
 
        WARN_ON(shmem->base.import_attach);
 
-       ret = drm_gem_shmem_get_pages(shmem);
-       WARN_ON_ONCE(ret != 0);
+       mutex_lock(&shmem->pages_lock);
+
+       /*
+        * We should have already pinned the pages when the buffer was first
+        * mmap'd, vm_open() just grabs an additional reference for the new
+        * mm the vma is getting copied into (ie. on fork()).
+        */
+       if (!WARN_ON_ONCE(!shmem->pages_use_count))
+               shmem->pages_use_count++;
+
+       mutex_unlock(&shmem->pages_lock);
 
        drm_gem_vm_open(vma);
 }
@@ -622,10 +630,8 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
        }
 
        ret = drm_gem_shmem_get_pages(shmem);
-       if (ret) {
-               drm_gem_vm_close(vma);
+       if (ret)
                return ret;
-       }
 
        vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
index 089046f..50fa3df 100644 (file)
@@ -1085,21 +1085,21 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
        reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
 
        /* Pin mksGuestStat user pages and store those in the instance descriptor */
-       nr_pinned_stat = pin_user_pages(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat, NULL);
+       nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat);
        if (num_pages_stat != nr_pinned_stat)
                goto err_pin_stat;
 
        for (i = 0; i < num_pages_stat; ++i)
                pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
 
-       nr_pinned_info = pin_user_pages(arg->info, num_pages_info, FOLL_LONGTERM, pages_info, NULL);
+       nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info);
        if (num_pages_info != nr_pinned_info)
                goto err_pin_info;
 
        for (i = 0; i < num_pages_info; ++i)
                pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
 
-       nr_pinned_strs = pin_user_pages(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs, NULL);
+       nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs);
        if (num_pages_strs != nr_pinned_strs)
                goto err_pin_strs;
 
index ecd3c2f..9c79873 100644 (file)
@@ -949,6 +949,10 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
        struct drm_device *dev = &dev_priv->drm;
        int i, ret;
 
+       /* Screen objects won't work if GMR's aren't available */
+       if (!dev_priv->has_gmr)
+               return -ENOSYS;
+
        if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
                return -ENOSYS;
        }
index 9c1d31f..bd47628 100644 (file)
@@ -1315,6 +1315,9 @@ static s32 snto32(__u32 value, unsigned n)
        if (!value || !n)
                return 0;
 
+       if (n > 32)
+               n = 32;
+
        switch (n) {
        case 8:  return ((__s8)value);
        case 16: return ((__s16)value);
index dad953f..8f58c3c 100644 (file)
 #define USB_DEVICE_ID_CH_AXIS_295      0x001c
 
 #define USB_VENDOR_ID_CHERRY           0x046a
+#define USB_DEVICE_ID_CHERRY_MOUSE_000C        0x000c
 #define USB_DEVICE_ID_CHERRY_CYMOTION  0x0023
 #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR    0x0027
 
 #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
 #define USB_DEVICE_ID_MS_PIXART_MOUSE    0x00cb
 #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS      0x02e0
+#define USB_DEVICE_ID_MS_MOUSE_0783      0x0783
 
 #define USB_VENDOR_ID_MOJO             0x8282
 #define USB_DEVICE_ID_RETRO_ADAPTER    0x3201
 #define USB_DEVICE_ID_SYNAPTICS_DELL_K15A      0x6e21
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
+#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017       0x73f6
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
 
 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
 
 #define USB_VENDOR_ID_PRIMAX   0x0461
 #define USB_DEVICE_ID_PRIMAX_MOUSE_4D22        0x4d22
+#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A        0x4e2a
 #define USB_DEVICE_ID_PRIMAX_KEYBOARD  0x4e05
 #define USB_DEVICE_ID_PRIMAX_REZEL     0x4e72
 #define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
index 430fa4f..75ebfcf 100644 (file)
@@ -121,6 +121,11 @@ static const struct hid_device_id ite_devices[] = {
                     USB_VENDOR_ID_SYNAPTICS,
                     USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
          .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+       /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_SYNAPTICS,
+                    USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017),
+         .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ite_devices);
index 5e6a0ce..e3fcf13 100644 (file)
@@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
                return -ENOMEM;
 
        i = strlen(lbuf);
+
+       if (i == 0) {
+               kfree(lbuf);
+               return -EINVAL;
+       }
+
        if (lbuf[i-1] == '\n') {
                if (i == 1) {
                        kfree(lbuf);
index 71a9c25..8a2aac1 100644 (file)
@@ -4269,21 +4269,6 @@ static void hidpp_remove(struct hid_device *hdev)
        mutex_destroy(&hidpp->send_mutex);
 }
 
-static const struct hid_device_id unhandled_hidpp_devices[] = {
-       /* Logitech Harmony Adapter for PS3, handled in hid-sony */
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
-       /* Handled in hid-generic */
-       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD) },
-       {}
-};
-
-static bool hidpp_match(struct hid_device *hdev,
-                       bool ignore_special_driver)
-{
-       /* Refuse to handle devices handled by other HID drivers */
-       return !hid_match_id(hdev, unhandled_hidpp_devices);
-}
-
 #define LDJ_DEVICE(product) \
        HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
                   USB_VENDOR_ID_LOGITECH, (product))
@@ -4367,9 +4352,15 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* MX5500 keyboard over Bluetooth */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
          .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
-
-       { /* And try to enable HID++ for all the Logitech Bluetooth devices */
-         HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_ANY, USB_VENDOR_ID_LOGITECH, HID_ANY_ID) },
+       { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
+       { /* MX Master mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) },
+       { /* MX Ergo trackball over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
+       { /* MX Master 3 mouse over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
        {}
 };
 
@@ -4383,7 +4374,6 @@ static const struct hid_usage_id hidpp_usages[] = {
 static struct hid_driver hidpp_driver = {
        .name = "logitech-hidpp-device",
        .id_table = hidpp_devices,
-       .match = hidpp_match,
        .report_fixup = hidpp_report_fixup,
        .probe = hidpp_probe,
        .remove = hidpp_remove,
index 50e1c71..0e9702c 100644 (file)
@@ -54,6 +54,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
@@ -122,6 +123,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
@@ -146,6 +148,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
index 0fbc408..7fa6fe0 100644 (file)
@@ -192,6 +192,7 @@ static int uclogic_probe(struct hid_device *hdev,
         * than the pen, so use QUIRK_MULTI_INPUT for all tablets.
         */
        hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+       hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE;
 
        /* Allocate and assign driver data */
        drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
index 4bd54c4..6b73eb0 100644 (file)
@@ -1193,7 +1193,7 @@ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
                           p[sizeof(btn_head)] < param_num) {
                        v = param_list[p[sizeof(btn_head)]];
                        put_unaligned((__u8)0x2A, p); /* Usage Maximum */
-                       put_unaligned_le16((__force u16)cpu_to_le16(v), p + 1);
+                       put_unaligned((__force u16)cpu_to_le16(v), (s16 *)(p + 1));
                        p += sizeof(btn_head) + 1;
                } else {
                        p++;
index 5273ee2..d65abe6 100644 (file)
@@ -66,6 +66,6 @@ endmenu
 
 config I2C_HID_CORE
        tristate
-       default y if I2C_HID_ACPI=y || I2C_HID_OF=y || I2C_HID_OF_GOODIX=y
-       default m if I2C_HID_ACPI=m || I2C_HID_OF=m || I2C_HID_OF_GOODIX=m
+       default y if I2C_HID_ACPI=y || I2C_HID_OF=y || I2C_HID_OF_ELAN=y || I2C_HID_OF_GOODIX=y
+       default m if I2C_HID_ACPI=m || I2C_HID_OF=m || I2C_HID_OF_ELAN=m || I2C_HID_OF_GOODIX=m
        select HID
index ab9697f..92efc46 100644 (file)
@@ -813,7 +813,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
        num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
        num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
        memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+       /*
+        * Set this now to ensure that drivers see the correct q->memory value
+        * in the queue_setup op.
+        */
+       mutex_lock(&q->mmap_lock);
        q->memory = memory;
+       mutex_unlock(&q->mmap_lock);
        set_queue_coherency(q, non_coherent_mem);
 
        /*
@@ -823,22 +829,27 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
        ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
                       plane_sizes, q->alloc_devs);
        if (ret)
-               return ret;
+               goto error;
 
        /* Check that driver has set sane values */
-       if (WARN_ON(!num_planes))
-               return -EINVAL;
+       if (WARN_ON(!num_planes)) {
+               ret = -EINVAL;
+               goto error;
+       }
 
        for (i = 0; i < num_planes; i++)
-               if (WARN_ON(!plane_sizes[i]))
-                       return -EINVAL;
+               if (WARN_ON(!plane_sizes[i])) {
+                       ret = -EINVAL;
+                       goto error;
+               }
 
        /* Finally, allocate buffers and video memory */
        allocated_buffers =
                __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
        if (allocated_buffers == 0) {
                dprintk(q, 1, "memory allocation failed\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto error;
        }
 
        /*
@@ -879,7 +890,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
        if (ret < 0) {
                /*
                 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
-                * from q->num_buffers.
+                * from q->num_buffers and it will reset q->memory to
+                * VB2_MEMORY_UNKNOWN.
                 */
                __vb2_queue_free(q, allocated_buffers);
                mutex_unlock(&q->mmap_lock);
@@ -895,6 +907,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
        q->waiting_for_buffers = !q->is_output;
 
        return 0;
+
+error:
+       mutex_lock(&q->mmap_lock);
+       q->memory = VB2_MEMORY_UNKNOWN;
+       mutex_unlock(&q->mmap_lock);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
 
@@ -906,6 +924,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
        unsigned int num_planes = 0, num_buffers, allocated_buffers;
        unsigned plane_sizes[VB2_MAX_PLANES] = { };
        bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
+       bool no_previous_buffers = !q->num_buffers;
        int ret;
 
        if (q->num_buffers == VB2_MAX_FRAME) {
@@ -913,13 +932,19 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
                return -ENOBUFS;
        }
 
-       if (!q->num_buffers) {
+       if (no_previous_buffers) {
                if (q->waiting_in_dqbuf && *count) {
                        dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
                        return -EBUSY;
                }
                memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+               /*
+                * Set this now to ensure that drivers see the correct q->memory
+                * value in the queue_setup op.
+                */
+               mutex_lock(&q->mmap_lock);
                q->memory = memory;
+               mutex_unlock(&q->mmap_lock);
                q->waiting_for_buffers = !q->is_output;
                set_queue_coherency(q, non_coherent_mem);
        } else {
@@ -945,14 +970,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
        ret = call_qop(q, queue_setup, q, &num_buffers,
                       &num_planes, plane_sizes, q->alloc_devs);
        if (ret)
-               return ret;
+               goto error;
 
        /* Finally, allocate buffers and video memory */
        allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
                                num_planes, plane_sizes);
        if (allocated_buffers == 0) {
                dprintk(q, 1, "memory allocation failed\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto error;
        }
 
        /*
@@ -983,7 +1009,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
        if (ret < 0) {
                /*
                 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
-                * from q->num_buffers.
+                * from q->num_buffers and it will reset q->memory to
+                * VB2_MEMORY_UNKNOWN.
                 */
                __vb2_queue_free(q, allocated_buffers);
                mutex_unlock(&q->mmap_lock);
@@ -998,6 +1025,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
        *count = allocated_buffers;
 
        return 0;
+
+error:
+       if (no_previous_buffers) {
+               mutex_lock(&q->mmap_lock);
+               q->memory = VB2_MEMORY_UNKNOWN;
+               mutex_unlock(&q->mmap_lock);
+       }
+       return ret;
 }
 EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
 
@@ -2165,6 +2200,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
        unsigned int buffer, plane;
 
        /*
+        * Sanity checks to ensure the lock is held, MEMORY_MMAP is
+        * used and fileio isn't active.
+        */
+       lockdep_assert_held(&q->mmap_lock);
+
+       if (q->memory != VB2_MEMORY_MMAP) {
+               dprintk(q, 1, "queue is not currently set up for mmap\n");
+               return -EINVAL;
+       }
+
+       if (vb2_fileio_is_active(q)) {
+               dprintk(q, 1, "file io in progress\n");
+               return -EBUSY;
+       }
+
+       /*
         * Go over all buffers and their planes, comparing the given offset
         * with an offset assigned to each plane. If a match is found,
         * return its buffer and plane numbers.
@@ -2265,11 +2316,6 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
        int ret;
        unsigned long length;
 
-       if (q->memory != VB2_MEMORY_MMAP) {
-               dprintk(q, 1, "queue is not currently set up for mmap\n");
-               return -EINVAL;
-       }
-
        /*
         * Check memory area access mode.
         */
@@ -2291,14 +2337,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
 
        mutex_lock(&q->mmap_lock);
 
-       if (vb2_fileio_is_active(q)) {
-               dprintk(q, 1, "mmap: file io in progress\n");
-               ret = -EBUSY;
-               goto unlock;
-       }
-
        /*
-        * Find the plane corresponding to the offset passed by userspace.
+        * Find the plane corresponding to the offset passed by userspace. This
+        * will return an error if not MEMORY_MMAP or file I/O is in progress.
         */
        ret = __find_plane_by_offset(q, off, &buffer, &plane);
        if (ret)
@@ -2351,22 +2392,25 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
        void *vaddr;
        int ret;
 
-       if (q->memory != VB2_MEMORY_MMAP) {
-               dprintk(q, 1, "queue is not currently set up for mmap\n");
-               return -EINVAL;
-       }
+       mutex_lock(&q->mmap_lock);
 
        /*
-        * Find the plane corresponding to the offset passed by userspace.
+        * Find the plane corresponding to the offset passed by userspace. This
+        * will return an error if not MEMORY_MMAP or file I/O is in progress.
         */
        ret = __find_plane_by_offset(q, off, &buffer, &plane);
        if (ret)
-               return ret;
+               goto unlock;
 
        vb = q->bufs[buffer];
 
        vaddr = vb2_plane_vaddr(vb, plane);
+       mutex_unlock(&q->mmap_lock);
        return vaddr ? (unsigned long)vaddr : -EINVAL;
+
+unlock:
+       mutex_unlock(&q->mmap_lock);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
 #endif
index 003c32f..942d000 100644 (file)
@@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
        const struct v4l2_bt_timings *bt = &t->bt;
        const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
        u32 caps = cap->capabilities;
+       const u32 max_vert = 10240;
+       u32 max_hor = 3 * bt->width;
 
        if (t->type != V4L2_DV_BT_656_1120)
                return false;
@@ -166,14 +168,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
        if (!bt->interlaced &&
            (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
                return false;
-       if (bt->hfrontporch > 2 * bt->width ||
-           bt->hsync > 1024 || bt->hbackporch > 1024)
+       /*
+        * Some video receivers cannot properly separate the frontporch,
+        * backporch and sync values, and instead they only have the total
+        * blanking. That can be assigned to any of these three fields.
+        * So just check that none of these are way out of range.
+        */
+       if (bt->hfrontporch > max_hor ||
+           bt->hsync > max_hor || bt->hbackporch > max_hor)
                return false;
-       if (bt->vfrontporch > 4096 ||
-           bt->vsync > 128 || bt->vbackporch > 4096)
+       if (bt->vfrontporch > max_vert ||
+           bt->vsync > max_vert || bt->vbackporch > max_vert)
                return false;
-       if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
-           bt->il_vsync > 128 || bt->il_vbackporch > 4096))
+       if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
+           bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
                return false;
        return fnc == NULL || fnc(t, fnc_handle);
 }
index f298b9b..b9a882f 100644 (file)
@@ -3247,7 +3247,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
                goto out;
 
        saddr = &combined->ip6.saddr;
-       daddr = &combined->ip6.saddr;
+       daddr = &combined->ip6.daddr;
 
        slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
                  __func__, slave->dev->name, bond_slave_state(slave),
index ed3d0b8..dc7192e 100644 (file)
@@ -796,9 +796,9 @@ static int can327_netdev_close(struct net_device *dev)
 
        netif_stop_queue(dev);
 
-       /* Give UART one final chance to flush. */
-       clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
-       flush_work(&elm->tx_work);
+       /* We don't flush the UART TX queue here, as we want final stop
+        * commands (like the above dummy char) to be flushed out.
+        */
 
        can_rx_offload_disable(&elm->offload);
        elm->can.state = CAN_STATE_STOPPED;
@@ -1069,12 +1069,15 @@ static void can327_ldisc_close(struct tty_struct *tty)
 {
        struct can327 *elm = (struct can327 *)tty->disc_data;
 
-       /* unregister_netdev() calls .ndo_stop() so we don't have to.
-        * Our .ndo_stop() also flushes the TTY write wakeup handler,
-        * so we can safely set elm->tty = NULL after this.
-        */
+       /* unregister_netdev() calls .ndo_stop() so we don't have to. */
        unregister_candev(elm->dev);
 
+       /* Give UART one final chance to flush.
+        * No need to clear TTY_DO_WRITE_WAKEUP since .write_wakeup() is
+        * serialised against .close() and will not be called once we return.
+        */
+       flush_work(&elm->tx_work);
+
        /* Mark channel as dead */
        spin_lock_bh(&elm->lock);
        tty->disc_data = NULL;
index fbb3413..f4db770 100644 (file)
@@ -864,12 +864,14 @@ static void slcan_close(struct tty_struct *tty)
 {
        struct slcan *sl = (struct slcan *)tty->disc_data;
 
-       /* unregister_netdev() calls .ndo_stop() so we don't have to.
-        * Our .ndo_stop() also flushes the TTY write wakeup handler,
-        * so we can safely set sl->tty = NULL after this.
-        */
        unregister_candev(sl->dev);
 
+       /*
+        * The netdev needn't be UP (so .ndo_stop() is not called). Hence make
+        * sure this is not running before freeing it up.
+        */
+       flush_work(&sl->tx_work);
+
        /* Mark channel as dead */
        spin_lock_bh(&sl->lock);
        tty->disc_data = NULL;
index 81b88e9..42323f5 100644 (file)
@@ -234,6 +234,10 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
                u8 rxerr = msg->msg.rx.data[2];
                u8 txerr = msg->msg.rx.data[3];
 
+               netdev_dbg(priv->netdev,
+                          "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
+                          msg->msg.rx.dlc, state, ecc, rxerr, txerr);
+
                skb = alloc_can_err_skb(priv->netdev, &cf);
                if (skb == NULL) {
                        stats->rx_dropped++;
@@ -260,6 +264,8 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
                                break;
                        default:
                                priv->can.state = CAN_STATE_ERROR_ACTIVE;
+                               txerr = 0;
+                               rxerr = 0;
                                break;
                        }
                } else {
index 2479be3..937cb22 100644 (file)
@@ -833,10 +833,13 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
 
        chip->info->ops->phylink_get_caps(chip, port, config);
 
-       /* Internal ports need GMII for PHYLIB */
-       if (mv88e6xxx_phy_is_internal(ds, port))
+       if (mv88e6xxx_phy_is_internal(ds, port)) {
+               __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+                         config->supported_interfaces);
+               /* Internal ports with no phy-mode need GMII for PHYLIB */
                __set_bit(PHY_INTERFACE_MODE_GMII,
                          config->supported_interfaces);
+       }
 }
 
 static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
index 10c6fea..bdbbff2 100644 (file)
@@ -95,6 +95,8 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
                if (IS_ERR(region)) {
                        while (--i >= 0)
                                dsa_devlink_region_destroy(priv->regions[i]);
+
+                       kfree(priv->regions);
                        return PTR_ERR(region);
                }
 
index 4126661..b70dcf3 100644 (file)
@@ -1038,7 +1038,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
 
                policing[bcast].sharindx = port;
                /* Only SJA1110 has multicast policers */
-               if (mcast <= table->ops->max_entry_count)
+               if (mcast < table->ops->max_entry_count)
                        policing[mcast].sharindx = port;
        }
 
index e104fb0..aa0d2f3 100644 (file)
@@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)
                        if (dma_mapping_error(greth->dev, dma_addr)) {
                                if (netif_msg_ifup(greth))
                                        dev_err(greth->dev, "Could not create initial DMA mapping\n");
+                               dev_kfree_skb(skb);
                                goto cleanup;
                        }
                        greth->rx_skbuff[i] = skb;
index 55dfdb3..f4ca0c6 100644 (file)
@@ -71,13 +71,14 @@ config BCM63XX_ENET
 config BCMGENET
        tristate "Broadcom GENET internal MAC support"
        depends on HAS_IOMEM
+       depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835
        select MII
        select PHYLIB
        select FIXED_PHY
        select BCM7XXX_PHY
        select MDIO_BCM_UNIMAC
        select DIMLIB
-       select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL)
+       select BROADCOM_PHY if ARCH_BCM2835
        help
          This driver supports the built-in Ethernet MACs found in the
          Broadcom BCM7xxx Set Top Box family chipset.
index 98f3dc4..f2f9549 100644 (file)
@@ -2239,7 +2239,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        err = register_netdev(netdev);
        if (err) {
                dev_err(dev, "Failed to register netdevice\n");
-               goto err_unregister_interrupts;
+               goto err_destroy_workqueue;
        }
 
        nic->msg_enable = debug;
@@ -2248,6 +2248,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+err_destroy_workqueue:
+       destroy_workqueue(nic->nicvf_rx_mode_wq);
 err_unregister_interrupts:
        nicvf_unregister_interrupts(nic);
 err_free_netdev:
index cacd454..c39b866 100644 (file)
@@ -132,6 +132,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
                                                 DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
                dev_err(dev, "DMA mapping failed\n");
+               kfree(cmd_buff);
                return -EFAULT;
        }
 
@@ -142,6 +143,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
                         DMA_TO_DEVICE);
        if (err) {
                dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+               kfree(cmd_buff);
                return err;
        }
 
@@ -172,6 +174,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
                                                 DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
                dev_err(dev, "DMA mapping failed\n");
+               kfree(cmd_buff);
                return -EFAULT;
        }
 
@@ -182,6 +185,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
                         DMA_TO_DEVICE);
        if (err) {
                dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+               kfree(cmd_buff);
                return err;
        }
 
index 2ca2b61..23e1a94 100644 (file)
@@ -1220,7 +1220,8 @@ fec_restart(struct net_device *ndev)
                writel(0, fep->hwp + FEC_IMASK);
 
        /* Init the interrupt coalescing */
-       fec_enet_itr_coal_set(ndev);
+       if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
+               fec_enet_itr_coal_set(ndev);
 }
 
 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
index 93846ba..ce2571c 100644 (file)
@@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)
                skb->protocol = eth_type_trans(skb, dev);
                napi_gro_receive(&priv->napi, skb);
                dev->stats.rx_packets++;
-               dev->stats.rx_bytes += skb->len;
+               dev->stats.rx_bytes += len;
 next:
                pos = (pos + 1) % rxq->num;
                if (rx_pkts_num >= limit)
index ffcf797..f867e95 100644 (file)
@@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
                skb->protocol = eth_type_trans(skb, dev);
                napi_gro_receive(&priv->napi, skb);
                dev->stats.rx_packets++;
-               dev->stats.rx_bytes += skb->len;
+               dev->stats.rx_bytes += len;
 next:
                pos = dma_ring_incr(pos, RX_DESC_NUM);
        }
index 49e9269..55cf2f6 100644 (file)
@@ -5936,9 +5936,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                e1000_tx_queue(tx_ring, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
                e1000_maybe_stop_tx(tx_ring,
-                                   (MAX_SKB_FRAGS *
+                                   ((MAX_SKB_FRAGS + 1) *
                                     DIV_ROUND_UP(PAGE_SIZE,
-                                                 adapter->tx_fifo_limit) + 2));
+                                                 adapter->tx_fifo_limit) + 4));
 
                if (!netdev_xmit_more() ||
                    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
index 4a6a6e4..f6fa63e 100644 (file)
@@ -4464,11 +4464,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
                        return -EOPNOTSUPP;
 
                /* First 4 bytes of L4 header */
-               if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
-                       new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
-               else if (!usr_ip4_spec->l4_4_bytes)
-                       new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
-               else
+               if (usr_ip4_spec->l4_4_bytes)
                        return -EOPNOTSUPP;
 
                /* Filtering on Type of Service is not supported. */
@@ -4507,11 +4503,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
                else
                        return -EOPNOTSUPP;
 
-               if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF))
-                       new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
-               else if (!usr_ip6_spec->l4_4_bytes)
-                       new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
-               else
+               if (usr_ip6_spec->l4_4_bytes)
                        return -EOPNOTSUPP;
 
                /* Filtering on Traffic class is not supported. */
index b3cb587..6416322 100644 (file)
@@ -10655,6 +10655,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
 }
 
 /**
+ * i40e_clean_xps_state - clean xps state for every tx_ring
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_clean_xps_state(struct i40e_vsi *vsi)
+{
+       int i;
+
+       if (vsi->tx_rings)
+               for (i = 0; i < vsi->num_queue_pairs; i++)
+                       if (vsi->tx_rings[i])
+                               clear_bit(__I40E_TX_XPS_INIT_DONE,
+                                         vsi->tx_rings[i]->state);
+}
+
+/**
  * i40e_prep_for_reset - prep for the core to reset
  * @pf: board private structure
  *
@@ -10678,8 +10693,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
        i40e_pf_quiesce_all_vsi(pf);
 
        for (v = 0; v < pf->num_alloc_vsi; v++) {
-               if (pf->vsi[v])
+               if (pf->vsi[v]) {
+                       i40e_clean_xps_state(pf->vsi[v]);
                        pf->vsi[v]->seid = 0;
+               }
        }
 
        i40e_shutdown_adminq(&pf->hw);
index 72ddcef..635f93d 100644 (file)
@@ -1578,6 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
        i40e_cleanup_reset_vf(vf);
 
        i40e_flush(hw);
+       usleep_range(20000, 40000);
        clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
 
        return true;
@@ -1701,6 +1702,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
        }
 
        i40e_flush(hw);
+       usleep_range(20000, 40000);
        clear_bit(__I40E_VF_DISABLE, pf->state);
 
        return true;
index e5f3e76..ff911af 100644 (file)
@@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
                        *data = 1;
                        return -1;
                }
+               wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8);
+               wr32(E1000_EIMS, BIT(0));
        } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
                shared_int = false;
                if (request_irq(irq,
index ff3e361..5aefaaf 100644 (file)
@@ -4271,7 +4271,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
        /* Use the cpu associated to the rxq when it is online, in all
         * the other cases, use the cpu 0 which can't be offline.
         */
-       if (cpu_online(pp->rxq_def))
+       if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
                elected_cpu = pp->rxq_def;
 
        max_cpu = num_present_cpus();
index e64318c..6a01ab1 100644 (file)
@@ -1134,7 +1134,12 @@ int otx2_init_tc(struct otx2_nic *nic)
                return err;
 
        tc->flow_ht_params = tc_flow_ht_params;
-       return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+       err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+       if (err) {
+               kfree(tc->tc_entries_bitmap);
+               tc->tc_entries_bitmap = NULL;
+       }
+       return err;
 }
 EXPORT_SYMBOL(otx2_init_tc);
 
index 81a8ccc..5693784 100644 (file)
@@ -359,7 +359,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
                goto err_out;
 
        usleep_range(26, 100);
-       while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+       while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
               (mistat & BUSY))
                cpu_relax();
 
@@ -397,7 +397,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
                goto err_out;
 
        usleep_range(26, 100);
-       while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+       while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
               (mistat & BUSY))
                cpu_relax();
 
index 66360c8..141897d 100644 (file)
@@ -317,7 +317,7 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
        next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry);
        db_hw = &next_dcb_hw->db[0];
        if (!(db_hw->status & FDMA_DCB_STATUS_DONE))
-               tx->dropped++;
+               return -EINVAL;
        db = list_first_entry(&tx->db_list, struct sparx5_db, list);
        list_move_tail(&db->list, &tx->db_list);
        next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA;
index eeac04b..b6bbb3c 100644 (file)
@@ -887,6 +887,8 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
 
 cleanup_ports:
        sparx5_cleanup_ports(sparx5);
+       if (sparx5->mact_queue)
+               destroy_workqueue(sparx5->mact_queue);
 cleanup_config:
        kfree(configs);
 cleanup_pnode:
@@ -911,6 +913,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
        sparx5_cleanup_ports(sparx5);
        /* Unregister netdevs */
        sparx5_unregister_notifier_blocks(sparx5);
+       destroy_workqueue(sparx5->mact_queue);
 
        return 0;
 }
index 83c16ca..6db6ac6 100644 (file)
@@ -234,9 +234,8 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
        sparx5_set_port_ifh(ifh, port->portno);
 
        if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-               ret = sparx5_ptp_txtstamp_request(port, skb);
-               if (ret)
-                       return ret;
+               if (sparx5_ptp_txtstamp_request(port, skb) < 0)
+                       return NETDEV_TX_BUSY;
 
                sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
                sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
@@ -250,23 +249,31 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
        else
                ret = sparx5_inject(sparx5, ifh, skb, dev);
 
-       if (ret == NETDEV_TX_OK) {
-               stats->tx_bytes += skb->len;
-               stats->tx_packets++;
+       if (ret == -EBUSY)
+               goto busy;
+       if (ret < 0)
+               goto drop;
 
-               if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
-                   SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
-                       return ret;
+       stats->tx_bytes += skb->len;
+       stats->tx_packets++;
+       sparx5->tx.packets++;
 
-               dev_kfree_skb_any(skb);
-       } else {
-               stats->tx_dropped++;
+       if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+           SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+               return NETDEV_TX_OK;
 
-               if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
-                   SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
-                       sparx5_ptp_txtstamp_release(port, skb);
-       }
-       return ret;
+       dev_consume_skb_any(skb);
+       return NETDEV_TX_OK;
+drop:
+       stats->tx_dropped++;
+       sparx5->tx.dropped++;
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+busy:
+       if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+           SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+               sparx5_ptp_txtstamp_release(port, skb);
+       return NETDEV_TX_BUSY;
 }
 
 static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
index 4a6efe6..65c24ee 100644 (file)
@@ -498,7 +498,14 @@ enum {
 
 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
 
-#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT
+/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
+ * so the driver is able to reliably support features like busy_poll.
+ */
+#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
+
+#define GDMA_DRV_CAP_FLAGS1 \
+       (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
+        GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
 
 #define GDMA_DRV_CAP_FLAGS2 0
 
index 9259a74..27a0f3a 100644 (file)
@@ -1303,10 +1303,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
                xdp_do_flush();
 }
 
-static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
+static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
 {
        struct mana_cq *cq = context;
        u8 arm_bit;
+       int w;
 
        WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
 
@@ -1315,26 +1316,31 @@ static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
        else
                mana_poll_tx_cq(cq);
 
-       if (cq->work_done < cq->budget &&
-           napi_complete_done(&cq->napi, cq->work_done)) {
+       w = cq->work_done;
+
+       if (w < cq->budget &&
+           napi_complete_done(&cq->napi, w)) {
                arm_bit = SET_ARM_BIT;
        } else {
                arm_bit = 0;
        }
 
        mana_gd_ring_cq(gdma_queue, arm_bit);
+
+       return w;
 }
 
 static int mana_poll(struct napi_struct *napi, int budget)
 {
        struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
+       int w;
 
        cq->work_done = 0;
        cq->budget = budget;
 
-       mana_cq_handler(cq, cq->gdma_cq);
+       w = mana_cq_handler(cq, cq->gdma_cq);
 
-       return min(cq->work_done, budget);
+       return min(w, budget);
 }
 
 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
index 2b427d8..ccacb6a 100644 (file)
@@ -282,7 +282,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
        dma_len = skb_headlen(skb);
        if (skb_is_gso(skb))
                type = NFDK_DESC_TX_TYPE_TSO;
-       else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+       else if (!nr_frags && dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)
                type = NFDK_DESC_TX_TYPE_SIMPLE;
        else
                type = NFDK_DESC_TX_TYPE_GATHER;
@@ -927,7 +927,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
        dma_len = pkt_len;
        dma_addr = rxbuf->dma_addr + dma_off;
 
-       if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+       if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)
                type = NFDK_DESC_TX_TYPE_SIMPLE;
        else
                type = NFDK_DESC_TX_TYPE_GATHER;
@@ -1325,7 +1325,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
        txbuf = &tx_ring->ktxbufs[wr_idx];
 
        dma_len = skb_headlen(skb);
-       if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+       if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD)
                type = NFDK_DESC_TX_TYPE_SIMPLE;
        else
                type = NFDK_DESC_TX_TYPE_GATHER;
index 6bc9233..33f723a 100644 (file)
@@ -841,7 +841,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
                                napi_gro_receive(&priv->napi[q],
                                                 priv->rx_1st_skb);
                                stats->rx_packets++;
-                               stats->rx_bytes += priv->rx_1st_skb->len;
+                               stats->rx_bytes += pkt_len;
                                break;
                        }
                }
index 50f6b4a..eb6d9cd 100644 (file)
@@ -108,10 +108,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
 
        axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
        axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
-       axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
-       axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
-       axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
-       axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
+       axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe");
+       axi->axi_fb = of_property_read_bool(np, "snps,fb");
+       axi->axi_mb = of_property_read_bool(np, "snps,mb");
+       axi->axi_rb =  of_property_read_bool(np, "snps,rb");
 
        if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
                axi->axi_wr_osr_lmt = 1;
index d04a239..b3b0ba8 100644 (file)
@@ -1454,7 +1454,7 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy
 
        if (speed == SPEED_1000)
                mac_control |= CPSW_SL_CTL_GIG;
-       if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII)
+       if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
                /* Can be used with in band mode only */
                mac_control |= CPSW_SL_CTL_EXT_EN;
        if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
index 450b16a..e1a569b 100644 (file)
@@ -885,7 +885,7 @@ static int ca8210_spi_transfer(
 
        dev_dbg(&spi->dev, "%s called\n", __func__);
 
-       cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
+       cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC);
        if (!cas_ctl)
                return -ENOMEM;
 
index c69b87d..edc769d 100644 (file)
@@ -970,7 +970,7 @@ static int cc2520_hw_init(struct cc2520_private *priv)
 
                if (timeout-- <= 0) {
                        dev_err(&priv->spi->dev, "oscillator start failed!\n");
-                       return ret;
+                       return -ETIMEDOUT;
                }
                udelay(1);
        } while (!(status & CC2520_STATUS_XOSC32M_STABLE));
index f41f67b..2fbac51 100644 (file)
@@ -3698,6 +3698,7 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
        [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
        [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
        [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
+       [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
 };
 
 static void macsec_free_netdev(struct net_device *dev)
index eb344f6..b782c35 100644 (file)
@@ -98,6 +98,7 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
         */
        rc = phy_device_register(phy);
        if (rc) {
+               device_set_node(&phy->mdio.dev, NULL);
                fwnode_handle_put(child);
                return rc;
        }
@@ -153,7 +154,8 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
                /* All data is now stored in the phy struct, so register it */
                rc = phy_device_register(phy);
                if (rc) {
-                       fwnode_handle_put(phy->mdio.dev.fwnode);
+                       phy->mdio.dev.fwnode = NULL;
+                       fwnode_handle_put(child);
                        goto clean_phy;
                }
        } else if (is_of_node(child)) {
index 796e9c7..510822d 100644 (file)
@@ -68,8 +68,9 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
        /* All data is now stored in the mdiodev struct; register it. */
        rc = mdio_device_register(mdiodev);
        if (rc) {
+               device_set_node(&mdiodev->dev, NULL);
+               fwnode_handle_put(fwnode);
                mdio_device_free(mdiodev);
-               of_node_put(child);
                return rc;
        }
 
index 250742f..044828d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/unistd.h>
+#include <linux/property.h>
 
 void mdio_device_free(struct mdio_device *mdiodev)
 {
@@ -30,6 +31,7 @@ EXPORT_SYMBOL(mdio_device_free);
 
 static void mdio_device_release(struct device *dev)
 {
+       fwnode_handle_put(dev->fwnode);
        kfree(to_mdio_device(dev));
 }
 
index 24bae27..cae2409 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/module.h>
 #include <linux/bitfield.h>
 #include <linux/hwmon.h>
+#include <linux/mutex.h>
 #include <linux/phy.h>
 #include <linux/polynomial.h>
 #include <linux/netdevice.h>
 #define VPSPEC1_TEMP_STA       0x0E
 #define VPSPEC1_TEMP_STA_DATA  GENMASK(9, 0)
 
+/* Mailbox */
+#define VSPEC1_MBOX_DATA       0x5
+#define VSPEC1_MBOX_ADDRLO     0x6
+#define VSPEC1_MBOX_CMD                0x7
+#define VSPEC1_MBOX_CMD_ADDRHI GENMASK(7, 0)
+#define VSPEC1_MBOX_CMD_RD     (0 << 8)
+#define VSPEC1_MBOX_CMD_READY  BIT(15)
+
 /* WoL */
 #define VPSPEC2_WOL_CTL                0x0E06
 #define VPSPEC2_WOL_AD01       0x0E08
 #define VPSPEC2_WOL_AD45       0x0E0A
 #define WOL_EN                 BIT(0)
 
+/* Internal registers, access via mbox */
+#define REG_GPIO0_OUT          0xd3ce00
+
 struct gpy_priv {
+       /* serialize mailbox acesses */
+       struct mutex mbox_lock;
+
        u8 fw_major;
        u8 fw_minor;
 };
@@ -187,6 +202,45 @@ static int gpy_hwmon_register(struct phy_device *phydev)
 }
 #endif
 
+static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
+{
+       struct gpy_priv *priv = phydev->priv;
+       int val, ret;
+       u16 cmd;
+
+       mutex_lock(&priv->mbox_lock);
+
+       ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_MBOX_ADDRLO,
+                           addr);
+       if (ret)
+               goto out;
+
+       cmd = VSPEC1_MBOX_CMD_RD;
+       cmd |= FIELD_PREP(VSPEC1_MBOX_CMD_ADDRHI, addr >> 16);
+
+       ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_MBOX_CMD, cmd);
+       if (ret)
+               goto out;
+
+       /* The mbox read is used in the interrupt workaround. It was observed
+        * that a read might take up to 2.5ms. This is also the time for which
+        * the interrupt line is stuck low. To be on the safe side, poll the
+        * ready bit for 10ms.
+        */
+       ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+                                       VSPEC1_MBOX_CMD, val,
+                                       (val & VSPEC1_MBOX_CMD_READY),
+                                       500, 10000, false);
+       if (ret)
+               goto out;
+
+       ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_MBOX_DATA);
+
+out:
+       mutex_unlock(&priv->mbox_lock);
+       return ret;
+}
+
 static int gpy_config_init(struct phy_device *phydev)
 {
        int ret;
@@ -201,6 +255,13 @@ static int gpy_config_init(struct phy_device *phydev)
        return ret < 0 ? ret : 0;
 }
 
+static bool gpy_has_broken_mdint(struct phy_device *phydev)
+{
+       /* At least these PHYs are known to have broken interrupt handling */
+       return phydev->drv->phy_id == PHY_ID_GPY215B ||
+              phydev->drv->phy_id == PHY_ID_GPY215C;
+}
+
 static int gpy_probe(struct phy_device *phydev)
 {
        struct device *dev = &phydev->mdio.dev;
@@ -218,6 +279,7 @@ static int gpy_probe(struct phy_device *phydev)
        if (!priv)
                return -ENOMEM;
        phydev->priv = priv;
+       mutex_init(&priv->mbox_lock);
 
        fw_version = phy_read(phydev, PHY_FWV);
        if (fw_version < 0)
@@ -492,6 +554,29 @@ static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev)
        if (!(reg & PHY_IMASK_MASK))
                return IRQ_NONE;
 
+       /* The PHY might leave the interrupt line asserted even after PHY_ISTAT
+        * is read. To avoid interrupt storms, delay the interrupt handling as
+        * long as the PHY drives the interrupt line. An internal bus read will
+        * stall as long as the interrupt line is asserted, thus just read a
+        * random register here.
+        * Because we cannot access the internal bus at all while the interrupt
+        * is driven by the PHY, there is no way to make the interrupt line
+        * unstuck (e.g. by changing the pinmux to GPIO input) during that time
+        * frame. Therefore, polling is the best we can do and won't do any more
+        * harm.
+        * It was observed that this bug happens on link state and link speed
+        * changes on a GPY215B and GYP215C independent of the firmware version
+        * (which doesn't mean that this list is exhaustive).
+        */
+       if (gpy_has_broken_mdint(phydev) &&
+           (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC))) {
+               reg = gpy_mbox_read(phydev, REG_GPIO0_OUT);
+               if (reg < 0) {
+                       phy_error(phydev);
+                       return IRQ_NONE;
+               }
+       }
+
        phy_trigger_machine(phydev);
 
        return IRQ_HANDLED;
index c8791e9..40ce8ab 100644 (file)
@@ -450,12 +450,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
        }
        rcv->state = PLIP_PK_DONE;
        if (rcv->skb) {
-               kfree_skb(rcv->skb);
+               dev_kfree_skb_irq(rcv->skb);
                rcv->skb = NULL;
        }
        snd->state = PLIP_PK_DONE;
        if (snd->skb) {
-               dev_kfree_skb(snd->skb);
+               dev_consume_skb_irq(snd->skb);
                snd->skb = NULL;
        }
        spin_unlock_irq(&nl->lock);
index a52ee2b..6312f67 100644 (file)
@@ -914,6 +914,7 @@ static int tbnet_open(struct net_device *dev)
                                eof_mask, tbnet_start_poll, net);
        if (!ring) {
                netdev_err(dev, "failed to allocate Rx ring\n");
+               tb_xdomain_release_out_hopid(xd, hopid);
                tb_ring_free(net->tx_ring.ring);
                net->tx_ring.ring = NULL;
                return -ENOMEM;
index d3e7b27..6f1e560 100644 (file)
@@ -75,8 +75,14 @@ vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
 
        for (i = 0; i < adapter->intr.num_intrs; i++)
                vmxnet3_enable_intr(adapter, i);
-       adapter->shared->devRead.intrConf.intrCtrl &=
+       if (!VMXNET3_VERSION_GE_6(adapter) ||
+           !adapter->queuesExtEnabled) {
+               adapter->shared->devRead.intrConf.intrCtrl &=
+                                       cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
+       } else {
+               adapter->shared->devReadExt.intrConfExt.intrCtrl &=
                                        cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
+       }
 }
 
 
@@ -85,8 +91,14 @@ vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
 {
        int i;
 
-       adapter->shared->devRead.intrConf.intrCtrl |=
+       if (!VMXNET3_VERSION_GE_6(adapter) ||
+           !adapter->queuesExtEnabled) {
+               adapter->shared->devRead.intrConf.intrCtrl |=
+                                       cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+       } else {
+               adapter->shared->devReadExt.intrConfExt.intrCtrl |=
                                        cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
+       }
        for (i = 0; i < adapter->intr.num_intrs; i++)
                vmxnet3_disable_intr(adapter, i);
 }
@@ -1396,6 +1408,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
        };
        u32 num_pkts = 0;
        bool skip_page_frags = false;
+       bool encap_lro = false;
        struct Vmxnet3_RxCompDesc *rcd;
        struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
        u16 segCnt = 0, mss = 0;
@@ -1556,13 +1569,18 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                        if (VMXNET3_VERSION_GE_2(adapter) &&
                            rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
                                struct Vmxnet3_RxCompDescExt *rcdlro;
+                               union Vmxnet3_GenericDesc *gdesc;
+
                                rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
+                               gdesc = (union Vmxnet3_GenericDesc *)rcd;
 
                                segCnt = rcdlro->segCnt;
                                WARN_ON_ONCE(segCnt == 0);
                                mss = rcdlro->mss;
                                if (unlikely(segCnt <= 1))
                                        segCnt = 0;
+                               encap_lro = (le32_to_cpu(gdesc->dword[0]) &
+                                       (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
                        } else {
                                segCnt = 0;
                        }
@@ -1630,7 +1648,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                        vmxnet3_rx_csum(adapter, skb,
                                        (union Vmxnet3_GenericDesc *)rcd);
                        skb->protocol = eth_type_trans(skb, adapter->netdev);
-                       if (!rcd->tcp ||
+                       if ((!rcd->tcp && !encap_lro) ||
                            !(adapter->netdev->features & NETIF_F_LRO))
                                goto not_lro;
 
@@ -1639,7 +1657,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                        SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
                                skb_shinfo(skb)->gso_size = mss;
                                skb_shinfo(skb)->gso_segs = segCnt;
-                       } else if (segCnt != 0 || skb->len > mtu) {
+                       } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
                                u32 hlen;
 
                                hlen = vmxnet3_get_hdr_len(adapter, skb,
@@ -1668,6 +1686,7 @@ not_lro:
                                napi_gro_receive(&rq->napi, skb);
 
                        ctx->skb = NULL;
+                       encap_lro = false;
                        num_pkts++;
                }
 
index 9c7a9a2..fc928b2 100644 (file)
@@ -332,6 +332,7 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
                        if (!ipc_mux->ul_adb.pp_qlt[i]) {
                                for (j = i - 1; j >= 0; j--)
                                        kfree(ipc_mux->ul_adb.pp_qlt[j]);
+                               kfree(ipc_mux);
                                return NULL;
                        }
                }
index 1545cbe..3dbfc8a 100644 (file)
@@ -386,7 +386,7 @@ int xenvif_dealloc_kthread(void *data);
 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
 
 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
 
 void xenvif_carrier_on(struct xenvif *vif);
 
index 650fa18..f3f2c07 100644 (file)
@@ -254,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
                skb_clear_hash(skb);
 
-       xenvif_rx_queue_tail(queue, skb);
+       if (!xenvif_rx_queue_tail(queue, skb))
+               goto drop;
+
        xenvif_kick_thread(queue);
 
        return NETDEV_TX_OK;
 
  drop:
        vif->dev->stats.tx_dropped++;
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
index 3d2081b..bf627af 100644 (file)
@@ -332,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
 
 
 struct xenvif_tx_cb {
-       u16 pending_idx;
+       u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
+       u8 copy_count;
 };
 
 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
+#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
 
 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
                                           u16 pending_idx,
@@ -370,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
        return skb;
 }
 
-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
-                                                       struct sk_buff *skb,
-                                                       struct xen_netif_tx_request *txp,
-                                                       struct gnttab_map_grant_ref *gop,
-                                                       unsigned int frag_overflow,
-                                                       struct sk_buff *nskb)
+static void xenvif_get_requests(struct xenvif_queue *queue,
+                               struct sk_buff *skb,
+                               struct xen_netif_tx_request *first,
+                               struct xen_netif_tx_request *txfrags,
+                               unsigned *copy_ops,
+                               unsigned *map_ops,
+                               unsigned int frag_overflow,
+                               struct sk_buff *nskb,
+                               unsigned int extra_count,
+                               unsigned int data_len)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
-       u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
-       int start;
+       u16 pending_idx;
        pending_ring_idx_t index;
        unsigned int nr_slots;
+       struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
+       struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
+       struct xen_netif_tx_request *txp = first;
+
+       nr_slots = shinfo->nr_frags + 1;
+
+       copy_count(skb) = 0;
+
+       /* Create copy ops for exactly data_len bytes into the skb head. */
+       __skb_put(skb, data_len);
+       while (data_len > 0) {
+               int amount = data_len > txp->size ? txp->size : data_len;
+
+               cop->source.u.ref = txp->gref;
+               cop->source.domid = queue->vif->domid;
+               cop->source.offset = txp->offset;
+
+               cop->dest.domid = DOMID_SELF;
+               cop->dest.offset = (offset_in_page(skb->data +
+                                                  skb_headlen(skb) -
+                                                  data_len)) & ~XEN_PAGE_MASK;
+               cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
+                                              - data_len);
+
+               cop->len = amount;
+               cop->flags = GNTCOPY_source_gref;
 
-       nr_slots = shinfo->nr_frags;
+               index = pending_index(queue->pending_cons);
+               pending_idx = queue->pending_ring[index];
+               callback_param(queue, pending_idx).ctx = NULL;
+               copy_pending_idx(skb, copy_count(skb)) = pending_idx;
+               copy_count(skb)++;
+
+               cop++;
+               data_len -= amount;
 
-       /* Skip first skb fragment if it is on same page as header fragment. */
-       start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+               if (amount == txp->size) {
+                       /* The copy op covered the full tx_request */
+
+                       memcpy(&queue->pending_tx_info[pending_idx].req,
+                              txp, sizeof(*txp));
+                       queue->pending_tx_info[pending_idx].extra_count =
+                               (txp == first) ? extra_count : 0;
+
+                       if (txp == first)
+                               txp = txfrags;
+                       else
+                               txp++;
+                       queue->pending_cons++;
+                       nr_slots--;
+               } else {
+                       /* The copy op partially covered the tx_request.
+                        * The remainder will be mapped.
+                        */
+                       txp->offset += amount;
+                       txp->size -= amount;
+               }
+       }
 
-       for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
-            shinfo->nr_frags++, txp++, gop++) {
+       for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
+            shinfo->nr_frags++, gop++) {
                index = pending_index(queue->pending_cons++);
                pending_idx = queue->pending_ring[index];
-               xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
+               xenvif_tx_create_map_op(queue, pending_idx, txp,
+                                       txp == first ? extra_count : 0, gop);
                frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+
+               if (txp == first)
+                       txp = txfrags;
+               else
+                       txp++;
        }
 
        if (frag_overflow) {
@@ -415,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
                skb_shinfo(skb)->frag_list = nskb;
        }
 
-       return gop;
+       (*copy_ops) = cop - queue->tx_copy_ops;
+       (*map_ops) = gop - queue->tx_map_ops;
 }
 
 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
@@ -451,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                               struct gnttab_copy **gopp_copy)
 {
        struct gnttab_map_grant_ref *gop_map = *gopp_map;
-       u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       u16 pending_idx;
        /* This always points to the shinfo of the skb being checked, which
         * could be either the first or the one on the frag_list
         */
@@ -462,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
        struct skb_shared_info *first_shinfo = NULL;
        int nr_frags = shinfo->nr_frags;
        const bool sharedslot = nr_frags &&
-                               frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
-       int i, err;
+                               frag_get_pending_idx(&shinfo->frags[0]) ==
+                                   copy_pending_idx(skb, copy_count(skb) - 1);
+       int i, err = 0;
 
-       /* Check status of header. */
-       err = (*gopp_copy)->status;
-       if (unlikely(err)) {
-               if (net_ratelimit())
-                       netdev_dbg(queue->vif->dev,
-                                  "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
-                                  (*gopp_copy)->status,
-                                  pending_idx,
-                                  (*gopp_copy)->source.u.ref);
-               /* The first frag might still have this slot mapped */
-               if (!sharedslot)
-                       xenvif_idx_release(queue, pending_idx,
-                                          XEN_NETIF_RSP_ERROR);
+       for (i = 0; i < copy_count(skb); i++) {
+               int newerr;
+
+               /* Check status of header. */
+               pending_idx = copy_pending_idx(skb, i);
+
+               newerr = (*gopp_copy)->status;
+               if (likely(!newerr)) {
+                       /* The first frag might still have this slot mapped */
+                       if (i < copy_count(skb) - 1 || !sharedslot)
+                               xenvif_idx_release(queue, pending_idx,
+                                                  XEN_NETIF_RSP_OKAY);
+               } else {
+                       err = newerr;
+                       if (net_ratelimit())
+                               netdev_dbg(queue->vif->dev,
+                                          "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
+                                          (*gopp_copy)->status,
+                                          pending_idx,
+                                          (*gopp_copy)->source.u.ref);
+                       /* The first frag might still have this slot mapped */
+                       if (i < copy_count(skb) - 1 || !sharedslot)
+                               xenvif_idx_release(queue, pending_idx,
+                                                  XEN_NETIF_RSP_ERROR);
+               }
+               (*gopp_copy)++;
        }
-       (*gopp_copy)++;
 
 check_frags:
        for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -526,14 +605,6 @@ check_frags:
                if (err)
                        continue;
 
-               /* First error: if the header haven't shared a slot with the
-                * first frag, release it as well.
-                */
-               if (!sharedslot)
-                       xenvif_idx_release(queue,
-                                          XENVIF_TX_CB(skb)->pending_idx,
-                                          XEN_NETIF_RSP_OKAY);
-
                /* Invalidate preceding fragments of this skb. */
                for (j = 0; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
@@ -803,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                     unsigned *copy_ops,
                                     unsigned *map_ops)
 {
-       struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
        struct sk_buff *skb, *nskb;
        int ret;
        unsigned int frag_overflow;
@@ -885,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        continue;
                }
 
+               data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
+                       XEN_NETBACK_TX_COPY_LEN : txreq.size;
+
                ret = xenvif_count_requests(queue, &txreq, extra_count,
                                            txfrags, work_to_do);
+
                if (unlikely(ret < 0))
                        break;
 
@@ -912,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                index = pending_index(queue->pending_cons);
                pending_idx = queue->pending_ring[index];
 
-               data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
-                           ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
-                       XEN_NETBACK_TX_COPY_LEN : txreq.size;
+               if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
+                       data_len = txreq.size;
 
                skb = xenvif_alloc_skb(data_len);
                if (unlikely(skb == NULL)) {
@@ -925,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                }
 
                skb_shinfo(skb)->nr_frags = ret;
-               if (data_len < txreq.size)
-                       skb_shinfo(skb)->nr_frags++;
                /* At this point shinfo->nr_frags is in fact the number of
                 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
                 */
@@ -988,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                             type);
                }
 
-               XENVIF_TX_CB(skb)->pending_idx = pending_idx;
-
-               __skb_put(skb, data_len);
-               queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
-               queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
-               queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
-
-               queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
-                       virt_to_gfn(skb->data);
-               queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
-               queue->tx_copy_ops[*copy_ops].dest.offset =
-                       offset_in_page(skb->data) & ~XEN_PAGE_MASK;
-
-               queue->tx_copy_ops[*copy_ops].len = data_len;
-               queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
-
-               (*copy_ops)++;
-
-               if (data_len < txreq.size) {
-                       frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
-                                            pending_idx);
-                       xenvif_tx_create_map_op(queue, pending_idx, &txreq,
-                                               extra_count, gop);
-                       gop++;
-               } else {
-                       frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
-                                            INVALID_PENDING_IDX);
-                       memcpy(&queue->pending_tx_info[pending_idx].req,
-                              &txreq, sizeof(txreq));
-                       queue->pending_tx_info[pending_idx].extra_count =
-                               extra_count;
-               }
-
-               queue->pending_cons++;
-
-               gop = xenvif_get_requests(queue, skb, txfrags, gop,
-                                         frag_overflow, nskb);
+               xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
+                                   map_ops, frag_overflow, nskb, extra_count,
+                                   data_len);
 
                __skb_queue_tail(&queue->tx_queue, skb);
 
                queue->tx.req_cons = idx;
 
-               if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
+               if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
                    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
                        break;
        }
 
-       (*map_ops) = gop - queue->tx_map_ops;
        return;
 }
 
@@ -1114,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
        while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
                struct xen_netif_tx_request *txp;
                u16 pending_idx;
-               unsigned data_len;
 
-               pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+               pending_idx = copy_pending_idx(skb, 0);
                txp = &queue->pending_tx_info[pending_idx].req;
 
                /* Check the remap error code. */
@@ -1135,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
                        continue;
                }
 
-               data_len = skb->len;
-               callback_param(queue, pending_idx).ctx = NULL;
-               if (data_len < txp->size) {
-                       /* Append the packet payload as a fragment. */
-                       txp->offset += data_len;
-                       txp->size -= data_len;
-               } else {
-                       /* Schedule a response immediately. */
-                       xenvif_idx_release(queue, pending_idx,
-                                          XEN_NETIF_RSP_OKAY);
-               }
-
                if (txp->flags & XEN_NETTXF_csum_blank)
                        skb->ip_summed = CHECKSUM_PARTIAL;
                else if (txp->flags & XEN_NETTXF_data_validated)
@@ -1332,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
 /* Called after netfront has transmitted */
 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
 {
-       unsigned nr_mops, nr_cops = 0;
+       unsigned nr_mops = 0, nr_cops = 0;
        int work_done, ret;
 
        if (unlikely(!tx_work_todo(queue)))
index 9327621..0ba754e 100644 (file)
@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
        return false;
 }
 
-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
 {
        unsigned long flags;
+       bool ret = true;
 
        spin_lock_irqsave(&queue->rx_queue.lock, flags);
 
@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
                struct net_device *dev = queue->vif->dev;
 
                netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
-               kfree_skb(skb);
-               queue->vif->dev->stats.rx_dropped++;
+               ret = false;
        } else {
                if (skb_queue_empty(&queue->rx_queue))
                        xenvif_update_needed_slots(queue, skb);
@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
        }
 
        spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+
+       return ret;
 }
 
 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
index 9af2b02..dc404e0 100644 (file)
@@ -1862,6 +1862,12 @@ static int netfront_resume(struct xenbus_device *dev)
        netif_tx_unlock_bh(info->netdev);
 
        xennet_disconnect_backend(info);
+
+       rtnl_lock();
+       if (info->queues)
+               xennet_destroy_queues(info);
+       rtnl_unlock();
+
        return 0;
 }
 
index 69e3339..7e3893d 100644 (file)
@@ -3095,10 +3095,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
        if (!ctrl->identified) {
                unsigned int i;
 
-               ret = nvme_init_subsystem(ctrl, id);
-               if (ret)
-                       goto out_free;
-
                /*
                 * Check for quirks.  Quirk can depend on firmware version,
                 * so, in principle, the set of quirks present can change
@@ -3111,6 +3107,10 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
                        if (quirk_matches(id, &core_quirks[i]))
                                ctrl->quirks |= core_quirks[i].quirks;
                }
+
+               ret = nvme_init_subsystem(ctrl, id);
+               if (ret)
+                       goto out_free;
        }
        memcpy(ctrl->subsys->firmware_rev, id->fr,
               sizeof(ctrl->subsys->firmware_rev));
index ef4ae97..439d282 100644 (file)
@@ -739,8 +739,14 @@ static void amd_pmc_s2idle_prepare(void)
 static void amd_pmc_s2idle_check(void)
 {
        struct amd_pmc_dev *pdev = &pmc;
+       struct smu_metrics table;
        int rc;
 
+       /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
+       if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
+           table.s0i3_last_entry_status)
+               usleep_range(10000, 20000);
+
        /* Dump the IdleMask before we add to the STB */
        amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
 
index 9dc9358..c6ded3f 100644 (file)
@@ -758,7 +758,6 @@ static void qeth_l2_br2dev_worker(struct work_struct *work)
        struct list_head *iter;
        int err = 0;
 
-       kfree(br2dev_event_work);
        QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event);
        QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr));
 
@@ -815,6 +814,7 @@ unlock:
        dev_put(brdev);
        dev_put(lsyncdev);
        dev_put(dstdev);
+       kfree(br2dev_event_work);
 }
 
 static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
index 451d8a0..bce2492 100644 (file)
@@ -605,6 +605,14 @@ again:
                        set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
                        queue = true;
                }
+               /*
+                * We could race with cookie_lru which may set LRU_DISCARD bit
+                * but has yet to run the cookie state machine.  If this happens
+                * and another thread tries to use the cookie, clear LRU_DISCARD
+                * so we don't end up withdrawing the cookie while in use.
+                */
+               if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags))
+                       fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear);
                break;
 
        case FSCACHE_COOKIE_STATE_FAILED:
index 528bd44..2b7d077 100644 (file)
@@ -68,6 +68,7 @@ struct css_task_iter {
        struct list_head                iters_node;     /* css_set->task_iters */
 };
 
+extern struct file_system_type cgroup_fs_type;
 extern struct cgroup_root cgrp_dfl_root;
 extern struct css_set init_css_set;
 
index 86b95cc..b07b277 100644 (file)
  * can use the extra bits to store other information besides PFN.
  */
 #ifdef MAX_PHYSMEM_BITS
-#define SWP_PFN_BITS                   (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define SWP_PFN_BITS           (MAX_PHYSMEM_BITS - PAGE_SHIFT)
 #else  /* MAX_PHYSMEM_BITS */
-#define SWP_PFN_BITS                   (BITS_PER_LONG - PAGE_SHIFT)
+#define SWP_PFN_BITS           min_t(int, \
+                                     sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
+                                     SWP_TYPE_SHIFT)
 #endif /* MAX_PHYSMEM_BITS */
-#define SWP_PFN_MASK                   (BIT(SWP_PFN_BITS) - 1)
+#define SWP_PFN_MASK           (BIT(SWP_PFN_BITS) - 1)
 
 /**
  * Migration swap entry specific bitfield definitions.  Layout:
index e004ba0..684f1cd 100644 (file)
@@ -228,6 +228,17 @@ enum {
         */
        HCI_QUIRK_VALID_LE_STATES,
 
+       /* When this quirk is set, then erroneous data reporting
+        * is ignored. This is mainly due to the fact that the HCI
+        * Read Default Erroneous Data Reporting command is advertised,
+        * but not supported; these controllers often reply with unknown
+        * command and tend to lock up randomly. Needing a hard reset.
+        *
+        * This quirk can be set before hci_register_dev is called or
+        * during the hdev->setup vendor callback.
+        */
+       HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
+
        /*
         * When this quirk is set, then the hci_suspend_notifier is not
         * registered. This is intended for devices which drop completely
@@ -1424,7 +1435,6 @@ struct hci_std_codecs_v2 {
 } __packed;
 
 struct hci_vnd_codec_v2 {
-       __u8    id;
        __le16  cid;
        __le16  vid;
        __u8    transport;
index e4ff391..9233ad3 100644 (file)
@@ -16,9 +16,6 @@
 #define PING_HTABLE_SIZE       64
 #define PING_HTABLE_MASK       (PING_HTABLE_SIZE-1)
 
-#define ping_portaddr_for_each_entry(__sk, node, list) \
-       hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
-
 /*
  * gid_t is either uint or ushort.  We want to pass it to
  * proc_dointvec_minmax(), so it must not be larger than MAX_INT
index c078c48..a6190aa 100644 (file)
@@ -66,6 +66,7 @@ enum fscache_cookie_trace {
        fscache_cookie_put_work,
        fscache_cookie_see_active,
        fscache_cookie_see_lru_discard,
+       fscache_cookie_see_lru_discard_clear,
        fscache_cookie_see_lru_do_one,
        fscache_cookie_see_relinquish,
        fscache_cookie_see_withdraw,
@@ -149,6 +150,7 @@ enum fscache_access_trace {
        EM(fscache_cookie_put_work,             "PQ  work ")            \
        EM(fscache_cookie_see_active,           "-   activ")            \
        EM(fscache_cookie_see_lru_discard,      "-   x-lru")            \
+       EM(fscache_cookie_see_lru_discard_clear,"-   lrudc")            \
        EM(fscache_cookie_see_lru_do_one,       "-   lrudo")            \
        EM(fscache_cookie_see_relinquish,       "-   x-rlq")            \
        EM(fscache_cookie_see_withdraw,         "-   x-wth")            \
index 8840cf3..61cd7ff 100644 (file)
@@ -2707,8 +2707,10 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb)
        /*
         * When @in_idle, we're in cancellation and it's racy to remove the
         * node. It'll be removed by the end of cancellation, just ignore it.
+        * tctx can be NULL if the queueing of this task_work raced with
+        * work cancelation off the exec path.
         */
-       if (!atomic_read(&tctx->in_idle))
+       if (tctx && !atomic_read(&tctx->in_idle))
                io_uring_del_tctx_node((unsigned long)work->ctx);
        complete(&work->completion);
 }
index c8496f9..00f88aa 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2179,14 +2179,15 @@ long __do_semtimedop(int semid, struct sembuf *sops,
                 * scenarios where we were awakened externally, during the
                 * window between wake_q_add() and wake_up_q().
                 */
+               rcu_read_lock();
                error = READ_ONCE(queue.status);
                if (error != -EINTR) {
                        /* see SEM_BARRIER_2 for purpose/pairing */
                        smp_acquire__after_ctrl_dep();
+                       rcu_read_unlock();
                        goto out;
                }
 
-               rcu_read_lock();
                locknum = sem_lock(sma, sops, nsops);
 
                if (!ipc_valid_object(&sma->sem_perm))
index fd40208..367b0a4 100644 (file)
@@ -167,7 +167,6 @@ struct cgroup_mgctx {
 extern spinlock_t css_set_lock;
 extern struct cgroup_subsys *cgroup_subsys[];
 extern struct list_head cgroup_roots;
-extern struct file_system_type cgroup_fs_type;
 
 /* iterate across the hierarchies */
 #define for_each_root(root)                                            \
index 188c305..c6d9dec 100644 (file)
@@ -267,13 +267,14 @@ int proc_dostring(struct ctl_table *table, int write,
                        ppos);
 }
 
-static size_t proc_skip_spaces(char **buf)
+static void proc_skip_spaces(char **buf, size_t *size)
 {
-       size_t ret;
-       char *tmp = skip_spaces(*buf);
-       ret = tmp - *buf;
-       *buf = tmp;
-       return ret;
+       while (*size) {
+               if (!isspace(**buf))
+                       break;
+               (*size)--;
+               (*buf)++;
+       }
 }
 
 static void proc_skip_char(char **buf, size_t *size, const char v)
@@ -342,13 +343,12 @@ static int proc_get_long(char **buf, size_t *size,
                          unsigned long *val, bool *neg,
                          const char *perm_tr, unsigned perm_tr_len, char *tr)
 {
-       int len;
        char *p, tmp[TMPBUFLEN];
+       ssize_t len = *size;
 
-       if (!*size)
+       if (len <= 0)
                return -EINVAL;
 
-       len = *size;
        if (len > TMPBUFLEN - 1)
                len = TMPBUFLEN - 1;
 
@@ -521,7 +521,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
                bool neg;
 
                if (write) {
-                       left -= proc_skip_spaces(&p);
+                       proc_skip_spaces(&p, &left);
 
                        if (!left)
                                break;
@@ -548,7 +548,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
        if (!write && !first && left && !err)
                proc_put_char(&buffer, &left, '\n');
        if (write && !err && left)
-               left -= proc_skip_spaces(&p);
+               proc_skip_spaces(&p, &left);
        if (write && first)
                return err ? : -EINVAL;
        *lenp -= left;
@@ -590,7 +590,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
        if (left > PAGE_SIZE - 1)
                left = PAGE_SIZE - 1;
 
-       left -= proc_skip_spaces(&p);
+       proc_skip_spaces(&p, &left);
        if (!left) {
                err = -EINVAL;
                goto out_free;
@@ -610,7 +610,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
        }
 
        if (!err && left)
-               left -= proc_skip_spaces(&p);
+               proc_skip_spaces(&p, &left);
 
 out_free:
        if (err)
@@ -1075,7 +1075,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
                if (write) {
                        bool neg;
 
-                       left -= proc_skip_spaces(&p);
+                       proc_skip_spaces(&p, &left);
                        if (!left)
                                break;
 
@@ -1104,7 +1104,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
        if (!write && !first && left && !err)
                proc_put_char(&buffer, &left, '\n');
        if (write && !err)
-               left -= proc_skip_spaces(&p);
+               proc_skip_spaces(&p, &left);
        if (write && first)
                return err ? : -EINVAL;
        *lenp -= left;
index fe195d4..3b7bc2c 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2852,7 +2852,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
                next = pud_addr_end(addr, end);
                if (unlikely(!pud_present(pud)))
                        return 0;
-               if (unlikely(pud_huge(pud))) {
+               if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
                        if (!gup_huge_pud(pud, pudp, addr, next, flags,
                                          pages, nr))
                                return 0;
index a1a35c1..266a1ab 100644 (file)
@@ -4832,6 +4832,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
        unsigned int efd, cfd;
        struct fd efile;
        struct fd cfile;
+       struct dentry *cdentry;
        const char *name;
        char *endp;
        int ret;
@@ -4886,6 +4887,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
                goto out_put_cfile;
 
        /*
+        * The control file must be a regular cgroup1 file. As a regular cgroup
+        * file can't be renamed, it's safe to access its name afterwards.
+        */
+       cdentry = cfile.file->f_path.dentry;
+       if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
+               ret = -EINVAL;
+               goto out_put_cfile;
+       }
+
+       /*
         * Determine the event callbacks and set them in @event.  This used
         * to be done via struct cftype but cgroup core no longer knows
         * about these events.  The following is crude but the whole thing
@@ -4893,7 +4904,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
         *
         * DO NOT ADD NEW FILES.
         */
-       name = cfile.file->f_path.dentry->d_name.name;
+       name = cdentry->d_name.name;
 
        if (!strcmp(name, "memory.usage_in_bytes")) {
                event->register_event = mem_cgroup_usage_register_event;
@@ -4917,7 +4928,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
         * automatically removed on cgroup destruction but the removal is
         * asynchronous, so take an extra ref on @css.
         */
-       cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
+       cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
                                               &memory_cgrp_subsys);
        ret = -EINVAL;
        if (IS_ERR(cfile_css))
index a5eb2f1..54abd46 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
                /* Search one past newbrk */
                mas_set(&mas, newbrk);
                brkvma = mas_find(&mas, oldbrk);
-               BUG_ON(brkvma == NULL);
-               if (brkvma->vm_start >= oldbrk)
+               if (!brkvma || brkvma->vm_start >= oldbrk)
                        goto out; /* mapping intersects with an existing non-brk vma. */
                /*
                 * mm->brk must be protected by write mmap_lock.
@@ -2946,9 +2945,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
         * Expand the existing vma if possible; Note that singular lists do not
         * occur after forking, so the expand will only happen on new VMAs.
         */
-       if (vma &&
-           (!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
-           ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
+       if (vma && vma->vm_end == addr && !vma_policy(vma) &&
+           can_vma_merge_after(vma, flags, NULL, NULL,
+                               addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
                mas_set_range(mas, vma->vm_start, addr + len - 1);
                if (mas_preallocate(mas, vma, GFP_KERNEL))
                        return -ENOMEM;
@@ -3035,11 +3034,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
                goto munmap_failed;
 
        vma = mas_prev(&mas, 0);
-       if (!vma || vma->vm_end != addr || vma_policy(vma) ||
-           !can_vma_merge_after(vma, flags, NULL, NULL,
-                                addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL))
-               vma = NULL;
-
        ret = do_brk_flags(&mas, vma, addr, len, flags);
        populate = ((mm->def_flags & VM_LOCKED) != 0);
        mmap_write_unlock(mm);
index c1d8b8a..82911fe 100644 (file)
@@ -948,6 +948,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                index++;
        }
 
+       /*
+        * When undoing a failed fallocate, we want none of the partial folio
+        * zeroing and splitting below, but shall want to truncate the whole
+        * folio when !uptodate indicates that it was added by this fallocate,
+        * even when [lstart, lend] covers only a part of the folio.
+        */
+       if (unfalloc)
+               goto whole_folios;
+
        same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
        folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
        if (folio) {
@@ -973,6 +982,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                folio_put(folio);
        }
 
+whole_folios:
+
        index = start;
        while (index < end) {
                cond_resched();
index 215af9b..c57d643 100644 (file)
@@ -972,6 +972,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
        hci_dev_lock(hdev);
        hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
        hci_dev_unlock(hdev);
+       hci_dev_put(hdev);
 
        if (!hcon)
                return -ENOENT;
index dc65974..1c3c7ff 100644 (file)
@@ -737,7 +737,7 @@ static int __init bt_init(void)
 
        err = bt_sysfs_init();
        if (err < 0)
-               return err;
+               goto cleanup_led;
 
        err = sock_register(&bt_sock_family_ops);
        if (err)
@@ -773,6 +773,8 @@ unregister_socket:
        sock_unregister(PF_BLUETOOTH);
 cleanup_sysfs:
        bt_sysfs_cleanup();
+cleanup_led:
+       bt_leds_cleanup();
        return err;
 }
 
index 3820153..3cc135b 100644 (file)
@@ -72,9 +72,8 @@ static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport,
                                continue;
                        }
 
-                       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS,
-                                            sizeof(*cmd), cmd,
-                                            HCI_CMD_TIMEOUT);
+                       skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS,
+                                               sizeof(*cmd), cmd, 0, HCI_CMD_TIMEOUT, NULL);
                        if (IS_ERR(skb)) {
                                bt_dev_err(hdev, "Failed to read codec capabilities (%ld)",
                                           PTR_ERR(skb));
@@ -127,8 +126,8 @@ void hci_read_supported_codecs(struct hci_dev *hdev)
        struct hci_op_read_local_codec_caps caps;
        __u8 i;
 
-       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL,
-                            HCI_CMD_TIMEOUT);
+       skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL,
+                               0, HCI_CMD_TIMEOUT, NULL);
 
        if (IS_ERR(skb)) {
                bt_dev_err(hdev, "Failed to read local supported codecs (%ld)",
@@ -158,7 +157,8 @@ void hci_read_supported_codecs(struct hci_dev *hdev)
        for (i = 0; i < std_codecs->num; i++) {
                caps.id = std_codecs->codec[i];
                caps.direction = 0x00;
-               hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps);
+               hci_read_codec_capabilities(hdev,
+                                           LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps);
        }
 
        skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num)
@@ -178,7 +178,8 @@ void hci_read_supported_codecs(struct hci_dev *hdev)
                caps.cid = vnd_codecs->codec[i].cid;
                caps.vid = vnd_codecs->codec[i].vid;
                caps.direction = 0x00;
-               hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK, &caps);
+               hci_read_codec_capabilities(hdev,
+                                           LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps);
        }
 
 error:
@@ -194,8 +195,8 @@ void hci_read_supported_codecs_v2(struct hci_dev *hdev)
        struct hci_op_read_local_codec_caps caps;
        __u8 i;
 
-       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL,
-                            HCI_CMD_TIMEOUT);
+       skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL,
+                               0, HCI_CMD_TIMEOUT, NULL);
 
        if (IS_ERR(skb)) {
                bt_dev_err(hdev, "Failed to read local supported codecs (%ld)",
index 0540555..d97fac4 100644 (file)
@@ -2764,7 +2764,8 @@ int hci_register_suspend_notifier(struct hci_dev *hdev)
 {
        int ret = 0;
 
-       if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
+       if (!hdev->suspend_notifier.notifier_call &&
+           !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
                hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
                ret = register_pm_notifier(&hdev->suspend_notifier);
        }
@@ -2776,8 +2777,11 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
 {
        int ret = 0;
 
-       if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
+       if (hdev->suspend_notifier.notifier_call) {
                ret = unregister_pm_notifier(&hdev->suspend_notifier);
+               if (!ret)
+                       hdev->suspend_notifier.notifier_call = NULL;
+       }
 
        return ret;
 }
index 5a0296a..f7e006a 100644 (file)
@@ -269,7 +269,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
                 const void *param)
 {
-       bt_dev_err(req->hdev, "HCI_REQ-0x%4.4x", opcode);
+       bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
        hci_req_add_ev(req, opcode, plen, param, 0);
 }
 
index 76c3107..1fc6931 100644 (file)
@@ -12,6 +12,7 @@
 #include <net/bluetooth/mgmt.h>
 
 #include "hci_request.h"
+#include "hci_codec.h"
 #include "hci_debugfs.h"
 #include "smp.h"
 #include "eir.h"
@@ -3780,7 +3781,8 @@ static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
 {
        if (!(hdev->commands[18] & 0x04) ||
-           !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING))
+           !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
+           test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
                return 0;
 
        return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
@@ -4238,11 +4240,12 @@ static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
 /* Read local codec list if the HCI command is supported */
 static int hci_read_local_codecs_sync(struct hci_dev *hdev)
 {
-       if (!(hdev->commands[29] & 0x20))
-               return 0;
+       if (hdev->commands[45] & 0x04)
+               hci_read_supported_codecs_v2(hdev);
+       else if (hdev->commands[29] & 0x20)
+               hci_read_supported_codecs(hdev);
 
-       return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL,
-                                    HCI_CMD_TIMEOUT);
+       return 0;
 }
 
 /* Read local pairing options if the HCI command is supported */
@@ -4298,7 +4301,8 @@ static int hci_set_err_data_report_sync(struct hci_dev *hdev)
        bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
 
        if (!(hdev->commands[18] & 0x08) ||
-           !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING))
+           !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
+           test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
                return 0;
 
        if (enabled == hdev->err_data_reporting)
@@ -4457,6 +4461,9 @@ static const struct {
        HCI_QUIRK_BROKEN(STORED_LINK_KEY,
                         "HCI Delete Stored Link Key command is advertised, "
                         "but not supported."),
+       HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
+                        "HCI Read Default Erroneous Data Reporting command is "
+                        "advertised, but not supported."),
        HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
                         "HCI Read Transmit Power Level command is advertised, "
                         "but not supported."),
index f825857..26db929 100644 (file)
@@ -879,6 +879,7 @@ static int iso_listen_bis(struct sock *sk)
                                 iso_pi(sk)->bc_sid);
 
        hci_dev_unlock(hdev);
+       hci_dev_put(hdev);
 
        return err;
 }
index 9c24947..9fdede5 100644 (file)
@@ -4453,7 +4453,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
 
        chan->ident = cmd->ident;
        l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
-       chan->num_conf_rsp++;
+       if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
+               chan->num_conf_rsp++;
 
        /* Reset config buffer. */
        chan->conf_len = 0;
index 27dcdcc..c69168f 100644 (file)
@@ -677,7 +677,7 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
 static int can_rcv(struct sk_buff *skb, struct net_device *dev,
                   struct packet_type *pt, struct net_device *orig_dev)
 {
-       if (unlikely(dev->type != ARPHRD_CAN || (!can_is_can_skb(skb)))) {
+       if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_can_skb(skb))) {
                pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
                             dev->type, skb->len);
 
@@ -692,7 +692,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
                     struct packet_type *pt, struct net_device *orig_dev)
 {
-       if (unlikely(dev->type != ARPHRD_CAN || (!can_is_canfd_skb(skb)))) {
+       if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canfd_skb(skb))) {
                pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
                             dev->type, skb->len);
 
@@ -707,7 +707,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
 static int canxl_rcv(struct sk_buff *skb, struct net_device *dev,
                     struct packet_type *pt, struct net_device *orig_dev)
 {
-       if (unlikely(dev->type != ARPHRD_CAN || (!can_is_canxl_skb(skb)))) {
+       if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canxl_skb(skb))) {
                pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n",
                             dev->type, skb->len);
 
index 846588c..53a206d 100644 (file)
@@ -49,7 +49,8 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
                return NULL;
        }
 
-       pskb_trim_rcsum(skb, skb->len - HELLCREEK_TAG_LEN);
+       if (pskb_trim_rcsum(skb, skb->len - HELLCREEK_TAG_LEN))
+               return NULL;
 
        dsa_default_offload_fwd_mark(skb);
 
index 38fa19c..4292502 100644 (file)
@@ -21,7 +21,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
        if (!skb->dev)
                return NULL;
 
-       pskb_trim_rcsum(skb, skb->len - len);
+       if (pskb_trim_rcsum(skb, skb->len - len))
+               return NULL;
 
        dsa_default_offload_fwd_mark(skb);
 
index 83e4136..1a85125 100644 (file)
@@ -665,7 +665,8 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
                 * padding and trailer we need to account for the fact that
                 * skb->data points to skb_mac_header(skb) + ETH_HLEN.
                 */
-               pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN);
+               if (pskb_trim_rcsum(skb, start_of_padding - ETH_HLEN))
+                       return NULL;
        /* Trap-to-host frame, no timestamp trailer */
        } else {
                *source_port = SJA1110_RX_HEADER_SRC_PORT(rx_header);
index f361d3d..b5736ef 100644 (file)
@@ -841,6 +841,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                return -EINVAL;
        }
 
+       if (!cfg->fc_table)
+               cfg->fc_table = RT_TABLE_MAIN;
+
        return 0;
 errout:
        return err;
index 19a6620..ce9ff3c 100644 (file)
@@ -423,6 +423,7 @@ static struct fib_info *fib_find_info(struct fib_info *nfi)
                    nfi->fib_prefsrc == fi->fib_prefsrc &&
                    nfi->fib_priority == fi->fib_priority &&
                    nfi->fib_type == fi->fib_type &&
+                   nfi->fib_tb_id == fi->fib_tb_id &&
                    memcmp(nfi->fib_metrics, fi->fib_metrics,
                           sizeof(u32) * RTAX_MAX) == 0 &&
                    !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
index f866d62..cae9f1a 100644 (file)
@@ -1492,24 +1492,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct ip_tunnel_parm *p = &t->parms;
        __be16 o_flags = p->o_flags;
 
-       if (t->erspan_ver <= 2) {
-               if (t->erspan_ver != 0 && !t->collect_md)
-                       o_flags |= TUNNEL_KEY;
-
-               if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
-                       goto nla_put_failure;
-
-               if (t->erspan_ver == 1) {
-                       if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
-                               goto nla_put_failure;
-               } else if (t->erspan_ver == 2) {
-                       if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
-                               goto nla_put_failure;
-                       if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
-                               goto nla_put_failure;
-               }
-       }
-
        if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
            nla_put_be16(skb, IFLA_GRE_IFLAGS,
                         gre_tnl_flags_to_gre_flags(p->i_flags)) ||
@@ -1550,6 +1532,34 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ip_tunnel *t = netdev_priv(dev);
+
+       if (t->erspan_ver <= 2) {
+               if (t->erspan_ver != 0 && !t->collect_md)
+                       t->parms.o_flags |= TUNNEL_KEY;
+
+               if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+                       goto nla_put_failure;
+
+               if (t->erspan_ver == 1) {
+                       if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
+                               goto nla_put_failure;
+               } else if (t->erspan_ver == 2) {
+                       if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+                               goto nla_put_failure;
+                       if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+                               goto nla_put_failure;
+               }
+       }
+
+       return ipgre_fill_info(skb, dev);
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
 static void erspan_setup(struct net_device *dev)
 {
        struct ip_tunnel *t = netdev_priv(dev);
@@ -1628,7 +1638,7 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = {
        .changelink     = erspan_changelink,
        .dellink        = ip_tunnel_dellink,
        .get_size       = ipgre_get_size,
-       .fill_info      = ipgre_fill_info,
+       .fill_info      = erspan_fill_info,
        .get_link_net   = ip_tunnel_get_link_net,
 };
 
index bde333b..04b4ec0 100644 (file)
 #include <net/transp_v6.h>
 #endif
 
+#define ping_portaddr_for_each_entry(__sk, node, list) \
+       hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
+#define ping_portaddr_for_each_entry_rcu(__sk, node, list) \
+       hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
+
 struct ping_table {
        struct hlist_nulls_head hash[PING_HTABLE_SIZE];
        spinlock_t              lock;
@@ -192,7 +197,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
                return NULL;
        }
 
-       ping_portaddr_for_each_entry(sk, hnode, hslot) {
+       ping_portaddr_for_each_entry_rcu(sk, hnode, hslot) {
                isk = inet_sk(sk);
 
                pr_debug("iterate\n");
index e195076..60fd91b 100644 (file)
@@ -920,6 +920,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                if (err < 0)
                        goto fail;
 
+               /* We prevent @rt from being freed. */
+               rcu_read_lock();
+
                for (;;) {
                        /* Prepare header of the next frame,
                         * before previous one went down. */
@@ -943,6 +946,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                if (err == 0) {
                        IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
                                      IPSTATS_MIB_FRAGOKS);
+                       rcu_read_unlock();
                        return 0;
                }
 
@@ -950,6 +954,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
                IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
                              IPSTATS_MIB_FRAGFAILS);
+               rcu_read_unlock();
                return err;
 
 slow_path_clean:
index 500ed1b..7e2065e 100644 (file)
@@ -662,6 +662,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
        sdata->dev = ndev;
        sdata->wpan_dev.wpan_phy = local->hw.phy;
        sdata->local = local;
+       INIT_LIST_HEAD(&sdata->wpan_dev.list);
 
        /* setup type-dependent data */
        ret = ieee802154_setup_sdata(sdata, type);
index 2692139..23b3fed 100644 (file)
@@ -891,7 +891,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        zone = nf_ct_zone(ct);
 
        if (!nf_ct_ext_valid_pre(ct->ext)) {
-               NF_CT_STAT_INC(net, insert_failed);
+               NF_CT_STAT_INC_ATOMIC(net, insert_failed);
                return -ETIMEDOUT;
        }
 
@@ -938,7 +938,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
 
        if (!nf_ct_ext_valid_post(ct->ext)) {
                nf_ct_kill(ct);
-               NF_CT_STAT_INC(net, drop);
+               NF_CT_STAT_INC_ATOMIC(net, drop);
                return -ETIMEDOUT;
        }
 
@@ -1275,7 +1275,7 @@ chaintoolong:
         */
        if (!nf_ct_ext_valid_post(ct->ext)) {
                nf_ct_kill(ct);
-               NF_CT_STAT_INC(net, drop);
+               NF_CT_STAT_INC_ATOMIC(net, drop);
                return NF_DROP;
        }
 
index d71150a..1286ae7 100644 (file)
@@ -328,8 +328,13 @@ nla_put_failure:
 }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
+static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
 {
+       u32 mark = READ_ONCE(ct->mark);
+
+       if (!mark)
+               return 0;
+
        if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
                goto nla_put_failure;
        return 0;
@@ -543,7 +548,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
 {
        if (ctnetlink_dump_status(skb, ct) < 0 ||
-           ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
+           ctnetlink_dump_mark(skb, ct) < 0 ||
            ctnetlink_dump_secctx(skb, ct) < 0 ||
            ctnetlink_dump_id(skb, ct) < 0 ||
            ctnetlink_dump_use(skb, ct) < 0 ||
@@ -722,7 +727,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
        struct sk_buff *skb;
        unsigned int type;
        unsigned int flags = 0, group;
-       u32 mark;
        int err;
 
        if (events & (1 << IPCT_DESTROY)) {
@@ -827,9 +831,8 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
        }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       mark = READ_ONCE(ct->mark);
-       if ((events & (1 << IPCT_MARK) || mark) &&
-           ctnetlink_dump_mark(skb, mark) < 0)
+       if (events & (1 << IPCT_MARK) &&
+           ctnetlink_dump_mark(skb, ct) < 0)
                goto nla_put_failure;
 #endif
        nlmsg_end(skb, nlh);
@@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
        const struct nf_conntrack_zone *zone;
        struct nlattr *nest_parms;
-       u32 mark;
 
        zone = nf_ct_zone(ct);
 
@@ -2733,8 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
                goto nla_put_failure;
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       mark = READ_ONCE(ct->mark);
-       if (mark && ctnetlink_dump_mark(skb, mark) < 0)
+       if (ctnetlink_dump_mark(skb, ct) < 0)
                goto nla_put_failure;
 #endif
        if (ctnetlink_dump_labels(skb, ct) < 0)
index 00b5228..0fdcdb2 100644 (file)
@@ -997,13 +997,13 @@ static void flow_offload_queue_work(struct flow_offload_work *offload)
        struct net *net = read_pnet(&offload->flowtable->net);
 
        if (offload->cmd == FLOW_CLS_REPLACE) {
-               NF_FLOW_TABLE_STAT_INC(net, count_wq_add);
+               NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_add);
                queue_work(nf_flow_offload_add_wq, &offload->work);
        } else if (offload->cmd == FLOW_CLS_DESTROY) {
-               NF_FLOW_TABLE_STAT_INC(net, count_wq_del);
+               NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_del);
                queue_work(nf_flow_offload_del_wq, &offload->work);
        } else {
-               NF_FLOW_TABLE_STAT_INC(net, count_wq_stats);
+               NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_stats);
                queue_work(nf_flow_offload_stats_wq, &offload->work);
        }
 }
index 4f9299b..06d46d1 100644 (file)
@@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
        struct nft_pipapo_match *m = priv->clone;
        u8 genmask = nft_genmask_next(net);
        struct nft_pipapo_field *f;
+       const u8 *start_p, *end_p;
        int i, bsize_max, err = 0;
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
@@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
        }
 
        /* Validate */
+       start_p = start;
+       end_p = end;
        nft_pipapo_for_each_field(f, i, m) {
-               const u8 *start_p = start, *end_p = end;
-
                if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
                        return -ENOSPC;
 
index 282c510..994a0a1 100644 (file)
@@ -240,6 +240,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
                target->sens_res = nfca_poll->sens_res;
                target->sel_res = nfca_poll->sel_res;
                target->nfcid1_len = nfca_poll->nfcid1_len;
+               if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1))
+                       return -EPROTO;
                if (target->nfcid1_len > 0) {
                        memcpy(target->nfcid1, nfca_poll->nfcid1,
                               target->nfcid1_len);
@@ -248,6 +250,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
                nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
 
                target->sensb_res_len = nfcb_poll->sensb_res_len;
+               if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res))
+                       return -EPROTO;
                if (target->sensb_res_len > 0) {
                        memcpy(target->sensb_res, nfcb_poll->sensb_res,
                               target->sensb_res_len);
@@ -256,6 +260,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
                nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
 
                target->sensf_res_len = nfcf_poll->sensf_res_len;
+               if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res))
+                       return -EPROTO;
                if (target->sensf_res_len > 0) {
                        memcpy(target->sensf_res, nfcf_poll->sensf_res,
                               target->sensf_res_len);
index e260c0d..b3ce248 100644 (file)
@@ -2224,7 +2224,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        if (tipc_own_addr(l->net) > msg_prevnode(hdr))
                l->net_plane = msg_net_plane(hdr);
 
-       skb_linearize(skb);
+       if (skb_linearize(skb))
+               goto exit;
+
        hdr = buf_msg(skb);
        data = msg_data(hdr);
 
index b48d97c..49ddc48 100644 (file)
@@ -1689,6 +1689,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
        struct tipc_node *n;
        struct sk_buff_head xmitq;
        bool node_up = false;
+       struct net *peer_net;
        int bearer_id;
        int rc;
 
@@ -1705,18 +1706,23 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
                return -EHOSTUNREACH;
        }
 
+       rcu_read_lock();
        tipc_node_read_lock(n);
        node_up = node_is_up(n);
-       if (node_up && n->peer_net && check_net(n->peer_net)) {
+       peer_net = n->peer_net;
+       tipc_node_read_unlock(n);
+       if (node_up && peer_net && check_net(peer_net)) {
                /* xmit inner linux container */
-               tipc_lxc_xmit(n->peer_net, list);
+               tipc_lxc_xmit(peer_net, list);
                if (likely(skb_queue_empty(list))) {
-                       tipc_node_read_unlock(n);
+                       rcu_read_unlock();
                        tipc_node_put(n);
                        return 0;
                }
        }
+       rcu_read_unlock();
 
+       tipc_node_read_lock(n);
        bearer_id = n->active_links[selector & 1];
        if (unlikely(bearer_id == INVALID_BEARER_ID)) {
                tipc_node_read_unlock(n);
index 105f522..616b55c 100644 (file)
@@ -114,14 +114,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
        return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
 }
 
-static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
+static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
+                           struct user_namespace *user_ns)
 {
-       uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
+       uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
-               u32 portid, u32 seq, u32 flags, int sk_ino)
+                       struct user_namespace *user_ns,
+                       u32 portid, u32 seq, u32 flags, int sk_ino)
 {
        struct nlmsghdr *nlh;
        struct unix_diag_msg *rep;
@@ -167,7 +169,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
                goto out_nlmsg_trim;
 
        if ((req->udiag_show & UDIAG_SHOW_UID) &&
-           sk_diag_dump_uid(sk, skb))
+           sk_diag_dump_uid(sk, skb, user_ns))
                goto out_nlmsg_trim;
 
        nlmsg_end(skb, nlh);
@@ -179,7 +181,8 @@ out_nlmsg_trim:
 }
 
 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
-               u32 portid, u32 seq, u32 flags)
+                       struct user_namespace *user_ns,
+                       u32 portid, u32 seq, u32 flags)
 {
        int sk_ino;
 
@@ -190,7 +193,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        if (!sk_ino)
                return 0;
 
-       return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
+       return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
 }
 
 static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -214,7 +217,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                goto next;
                        if (!(req->udiag_states & (1 << sk->sk_state)))
                                goto next;
-                       if (sk_diag_dump(sk, skb, req,
+                       if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
                                         NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq,
                                         NLM_F_MULTI) < 0) {
@@ -282,7 +285,8 @@ again:
        if (!rep)
                goto out;
 
-       err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
+       err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
+                          NETLINK_CB(in_skb).portid,
                           nlh->nlmsg_seq, 0, req->udiag_ino);
        if (err < 0) {
                nlmsg_free(rep);
index 22b31eb..258ddc5 100644 (file)
 
 
 /*
- * Memory cgroup charging is performed using percpu batches 32 pages
+ * Memory cgroup charging is performed using percpu batches 64 pages
  * big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
  * the maximum discrepancy between charge and vmstat entries is number
- * of cpus multiplied by 32 pages.
+ * of cpus multiplied by 64 pages.
  */
-#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs())
+#define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs())
 
 
 static int alloc_dcache(const char *cgroup, void *arg)
index 3d7adee..ff8fe93 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 bind_bhash
 cmsg_sender
+diag_uid
 fin_ack_lat
 gro
 hwtstamp_config
index 969620a..1e4b397 100644 (file)
@@ -1,3 +1,3 @@
-TEST_GEN_PROGS := test_unix_oob unix_connect
+TEST_GEN_PROGS := diag_uid test_unix_oob unix_connect
 
 include ../../lib.mk
diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
new file mode 100644 (file)
index 0000000..5b88f71
--- /dev/null
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <unistd.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/sock_diag.h>
+#include <linux/unix_diag.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+
+#include "../../kselftest_harness.h"
+
+FIXTURE(diag_uid)
+{
+       int netlink_fd;
+       int unix_fd;
+       __u32 inode;
+       __u64 cookie;
+};
+
+FIXTURE_VARIANT(diag_uid)
+{
+       int unshare;
+       int udiag_show;
+};
+
+FIXTURE_VARIANT_ADD(diag_uid, uid)
+{
+       .unshare = 0,
+       .udiag_show = UDIAG_SHOW_UID
+};
+
+FIXTURE_VARIANT_ADD(diag_uid, uid_unshare)
+{
+       .unshare = CLONE_NEWUSER,
+       .udiag_show = UDIAG_SHOW_UID
+};
+
+FIXTURE_SETUP(diag_uid)
+{
+       struct stat file_stat;
+       socklen_t optlen;
+       int ret;
+
+       if (variant->unshare)
+               ASSERT_EQ(unshare(variant->unshare), 0);
+
+       self->netlink_fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_SOCK_DIAG);
+       ASSERT_NE(self->netlink_fd, -1);
+
+       self->unix_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+       ASSERT_NE(self->unix_fd, -1);
+
+       ret = fstat(self->unix_fd, &file_stat);
+       ASSERT_EQ(ret, 0);
+
+       self->inode = file_stat.st_ino;
+
+       optlen = sizeof(self->cookie);
+       ret = getsockopt(self->unix_fd, SOL_SOCKET, SO_COOKIE, &self->cookie, &optlen);
+       ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(diag_uid)
+{
+       close(self->netlink_fd);
+       close(self->unix_fd);
+}
+
+int send_request(struct __test_metadata *_metadata,
+                FIXTURE_DATA(diag_uid) *self,
+                const FIXTURE_VARIANT(diag_uid) *variant)
+{
+       struct {
+               struct nlmsghdr nlh;
+               struct unix_diag_req udr;
+       } req = {
+               .nlh = {
+                       .nlmsg_len = sizeof(req),
+                       .nlmsg_type = SOCK_DIAG_BY_FAMILY,
+                       .nlmsg_flags = NLM_F_REQUEST
+               },
+               .udr = {
+                       .sdiag_family = AF_UNIX,
+                       .udiag_ino = self->inode,
+                       .udiag_cookie = {
+                               (__u32)self->cookie,
+                               (__u32)(self->cookie >> 32)
+                       },
+                       .udiag_show = variant->udiag_show
+               }
+       };
+       struct sockaddr_nl nladdr = {
+               .nl_family = AF_NETLINK
+       };
+       struct iovec iov = {
+               .iov_base = &req,
+               .iov_len = sizeof(req)
+       };
+       struct msghdr msg = {
+               .msg_name = &nladdr,
+               .msg_namelen = sizeof(nladdr),
+               .msg_iov = &iov,
+               .msg_iovlen = 1
+       };
+
+       return sendmsg(self->netlink_fd, &msg, 0);
+}
+
+void render_response(struct __test_metadata *_metadata,
+                    struct unix_diag_req *udr, __u32 len)
+{
+       unsigned int rta_len = len - NLMSG_LENGTH(sizeof(*udr));
+       struct rtattr *attr;
+       uid_t uid;
+
+       ASSERT_GT(len, sizeof(*udr));
+       ASSERT_EQ(udr->sdiag_family, AF_UNIX);
+
+       attr = (struct rtattr *)(udr + 1);
+       ASSERT_NE(RTA_OK(attr, rta_len), 0);
+       ASSERT_EQ(attr->rta_type, UNIX_DIAG_UID);
+
+       uid = *(uid_t *)RTA_DATA(attr);
+       ASSERT_EQ(uid, getuid());
+}
+
+void receive_response(struct __test_metadata *_metadata,
+                     FIXTURE_DATA(diag_uid) *self)
+{
+       long buf[8192 / sizeof(long)];
+       struct sockaddr_nl nladdr = {
+               .nl_family = AF_NETLINK
+       };
+       struct iovec iov = {
+               .iov_base = buf,
+               .iov_len = sizeof(buf)
+       };
+       struct msghdr msg = {
+               .msg_name = &nladdr,
+               .msg_namelen = sizeof(nladdr),
+               .msg_iov = &iov,
+               .msg_iovlen = 1
+       };
+       struct unix_diag_req *udr;
+       struct nlmsghdr *nlh;
+       int ret;
+
+       ret = recvmsg(self->netlink_fd, &msg, 0);
+       ASSERT_GT(ret, 0);
+
+       nlh = (struct nlmsghdr *)buf;
+       ASSERT_NE(NLMSG_OK(nlh, ret), 0);
+       ASSERT_EQ(nlh->nlmsg_type, SOCK_DIAG_BY_FAMILY);
+
+       render_response(_metadata, NLMSG_DATA(nlh), nlh->nlmsg_len);
+
+       nlh = NLMSG_NEXT(nlh, ret);
+       ASSERT_EQ(NLMSG_OK(nlh, ret), 0);
+}
+
+TEST_F(diag_uid, 1)
+{
+       int ret;
+
+       ret = send_request(_metadata, self, variant);
+       ASSERT_GT(ret, 0);
+
+       receive_response(_metadata, self);
+}
+
+TEST_HARNESS_MAIN
index ead7963..bd89198 100644 (file)
@@ -43,5 +43,5 @@ CONFIG_NET_ACT_TUNNEL_KEY=m
 CONFIG_NET_ACT_MIRRED=m
 CONFIG_BAREUDP=m
 CONFIG_IPV6_IOAM6_LWTUNNEL=y
-CONFIG_CRYPTO_SM4=y
+CONFIG_CRYPTO_SM4_GENERIC=y
 CONFIG_AMT=m
index 2271a87..5637b5d 100755 (executable)
@@ -1711,13 +1711,21 @@ ipv4_del_addr_test()
 
        $IP addr add dev dummy1 172.16.104.1/24
        $IP addr add dev dummy1 172.16.104.11/24
+       $IP addr add dev dummy1 172.16.104.12/24
+       $IP addr add dev dummy1 172.16.104.13/24
        $IP addr add dev dummy2 172.16.104.1/24
        $IP addr add dev dummy2 172.16.104.11/24
+       $IP addr add dev dummy2 172.16.104.12/24
        $IP route add 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+       $IP route add 172.16.106.0/24 dev lo src 172.16.104.12
+       $IP route add table 0 172.16.107.0/24 via 172.16.104.2 src 172.16.104.13
        $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
+       $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12
        set +e
 
        # removing address from device in vrf should only remove route from vrf table
+       echo "    Regular FIB info"
+
        $IP addr del dev dummy2 172.16.104.11/24
        $IP ro ls vrf red | grep -q 172.16.105.0/24
        log_test $? 1 "Route removed from VRF when source address deleted"
@@ -1735,6 +1743,35 @@ ipv4_del_addr_test()
        $IP ro ls vrf red | grep -q 172.16.105.0/24
        log_test $? 0 "Route in VRF is not removed by address delete"
 
+       # removing address from device in vrf should only remove route from vrf
+       # table even when the associated fib info only differs in table ID
+       echo "    Identical FIB info with different table ID"
+
+       $IP addr del dev dummy2 172.16.104.12/24
+       $IP ro ls vrf red | grep -q 172.16.106.0/24
+       log_test $? 1 "Route removed from VRF when source address deleted"
+
+       $IP ro ls | grep -q 172.16.106.0/24
+       log_test $? 0 "Route in default VRF not removed"
+
+       $IP addr add dev dummy2 172.16.104.12/24
+       $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12
+
+       $IP addr del dev dummy1 172.16.104.12/24
+       $IP ro ls | grep -q 172.16.106.0/24
+       log_test $? 1 "Route removed in default VRF when source address deleted"
+
+       $IP ro ls vrf red | grep -q 172.16.106.0/24
+       log_test $? 0 "Route in VRF is not removed by address delete"
+
+       # removing address from device in default vrf should remove route from
+       # the default vrf even when route was inserted with a table ID of 0.
+       echo "    Table ID 0"
+
+       $IP addr del dev dummy1 172.16.104.13/24
+       $IP ro ls | grep -q 172.16.107.0/24
+       log_test $? 1 "Route removed in default VRF when source address deleted"
+
        $IP li del dummy1
        $IP li del dummy2
        cleanup
index 0900c54..275491b 100755 (executable)
@@ -782,7 +782,7 @@ kci_test_ipsec_offload()
            tmpl proto esp src $srcip dst $dstip spi 9 \
            mode transport reqid 42
        check_err $?
-       ip x p add dir out src $dstip/24 dst $srcip/24 \
+       ip x p add dir in src $dstip/24 dst $srcip/24 \
            tmpl proto esp src $dstip dst $srcip spi 9 \
            mode transport reqid 42
        check_err $?
index 0a49907..da5bfd8 100755 (executable)
@@ -32,7 +32,7 @@ DEV="eth0"
 # This is determined by reading the RSS indirection table using ethtool.
 get_rss_cfg_num_rxqs() {
        echo $(ethtool -x "${DEV}" |
-               egrep [[:space:]]+[0-9]+:[[:space:]]+ |
+               grep -E [[:space:]]+[0-9]+:[[:space:]]+ |
                cut -d: -f2- |
                awk '{$1=$1};1' |
                tr ' ' '\n' |