Merge tag 'perf-tools-fixes-for-v5.18-2022-05-14' of git://git.kernel.org/pub/scm...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 May 2022 18:43:47 +0000 (11:43 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 May 2022 18:43:47 +0000 (11:43 -0700)
Pull perf tools fixes from Arnaldo Carvalho de Melo:

 - Fix two NDEBUG warnings in 'perf bench numa'

 - Fix ARM coresight `perf test` failure

 - Sync linux/kvm.h with the kernel sources

 - Add James and Mike as Arm64 performance events reviewers

* tag 'perf-tools-fixes-for-v5.18-2022-05-14' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
  MAINTAINERS: Add James and Mike as Arm64 performance events reviewers
  tools headers UAPI: Sync linux/kvm.h with the kernel sources
  perf tests: Fix coresight `perf test` failure.
  perf bench: Fix two numa NDEBUG warnings

133 files changed:
.mailmap
Documentation/arm64/silicon-errata.rst
MAINTAINERS
arch/arm/include/asm/io.h
arch/arm/mm/ioremap.c
arch/arm64/include/asm/io.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso32/Makefile
arch/arm64/mm/ioremap.c
drivers/dma-buf/dma-buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hwmon/Kconfig
drivers/hwmon/tmp401.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/ocelot/felix.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_idc.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/mediatek/mtk_ppe.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_flower.c
drivers/net/ethernet/mscc/ocelot_vcap.c
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/ptp.h
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/core.h
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/mac.h
drivers/net/wireless/ath/ath11k/reg.c
drivers/net/wireless/ath/ath11k/reg.h
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/mac80211_hwsim.c
drivers/platform/surface/aggregator/core.c
drivers/platform/surface/surface_gpe.c
drivers/platform/x86/intel/pmt/telemetry.c
drivers/platform/x86/thinkpad_acpi.c
drivers/ptp/ptp_ocp.c
drivers/s390/net/ctcm_mpc.c
drivers/s390/net/ctcm_sysfs.c
drivers/s390/net/lcs.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/qla2xxx/qla_target.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/core/fbsysfs.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/simplefb.c
drivers/video/fbdev/vesafb.c
fs/ceph/addr.c
fs/ceph/file.c
fs/fs-writeback.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/nfs/fs_context.c
fs/notify/fanotify/fanotify_user.c
fs/proc/fd.c
fs/udf/namei.c
include/linux/netdev_features.h
include/linux/sunrpc/clnt.h
include/net/bluetooth/hci_core.h
include/net/tc_act/tc_pedit.h
include/soc/mscc/ocelot_vcap.h
include/uapi/linux/rfkill.h
include/uapi/linux/virtio_ids.h
kernel/cgroup/cpuset.c
lib/dim/net_dim.c
mm/huge_memory.c
mm/kfence/core.c
mm/memory-failure.c
mm/mremap.c
net/batman-adv/fragmentation.c
net/bluetooth/hci_core.c
net/core/skbuff.c
net/decnet/dn_dev.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/dsa/port.c
net/ipv4/ping.c
net/ipv4/route.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/netlink/af_netlink.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/sched/act_pedit.c
net/smc/smc_rx.c
net/sunrpc/auth_gss/gss_rpc_upcall.c
net/sunrpc/clnt.c
net/tls/tls_device.c
net/wireless/nl80211.c
net/wireless/scan.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/bpf/Makefile [new file with mode: 0644]
tools/testing/selftests/net/bpf/nat6to4.c [new file with mode: 0644]
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/udpgro_frglist.sh [new file with mode: 0755]
tools/testing/selftests/vm/Makefile

index ea1ba4a..0b04aa2 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -205,6 +205,7 @@ Juha Yrjola <at solidboot.com>
 Juha Yrjola <juha.yrjola@nokia.com>
 Juha Yrjola <juha.yrjola@solidboot.com>
 Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
+Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org>
 Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
 Kay Sievers <kay.sievers@vrfy.org>
 Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
@@ -250,6 +251,7 @@ Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
+Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
 Mathieu Othacehe <m.othacehe@gmail.com>
 Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
 Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
index 466cb9e..d27db84 100644 (file)
@@ -189,6 +189,9 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Kryo4xx Silver  | N/A             | ARM64_ERRATUM_1024718       |
 +----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold    | N/A             | ARM64_ERRATUM_1286807       |
++----------------+-----------------+-----------------+-----------------------------+
+
 +----------------+-----------------+-----------------+-----------------------------+
 | Fujitsu        | A64FX           | E#010001        | FUJITSU_ERRATUM_010001      |
 +----------------+-----------------+-----------------+-----------------------------+
index 2da9d14..d6d879c 100644 (file)
@@ -3571,8 +3571,9 @@ M:        Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://sourceforge.net/projects/bonding/
+F:     Documentation/networking/bonding.rst
 F:     drivers/net/bonding/
-F:     include/net/bonding.h
+F:     include/net/bond*
 F:     include/uapi/linux/if_bonding.h
 
 BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@@ -5439,6 +5440,7 @@ F:        net/ax25/sysctl_net_ax25.c
 
 DATA ACCESS MONITOR
 M:     SeongJae Park <sj@kernel.org>
+L:     damon@lists.linux.dev
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -10131,7 +10133,7 @@ S:      Supported
 F:     drivers/net/wireless/intel/iwlegacy/
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
-M:     Luca Coelho <luciano.coelho@intel.com>
+M:     Gregory Greenman <gregory.greenman@intel.com>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
index 0c70eb6..2a0739a 100644 (file)
@@ -440,6 +440,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+                                       unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
 #endif
 
 /*
index aa08bcb..2907023 100644 (file)
@@ -493,3 +493,11 @@ void __init early_ioremap_init(void)
 {
        early_ioremap_setup();
 }
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+                                unsigned long flags)
+{
+       unsigned long pfn = PHYS_PFN(offset);
+
+       return memblock_is_map_memory(pfn);
+}
index 7fd836b..3995652 100644 (file)
@@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
 
+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+                                       unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
+
 #endif /* __ASM_IO_H */
index 986837d..fa7981d 100644 (file)
@@ -75,6 +75,10 @@ obj-$(CONFIG_ARM64_MTE)                      += mte.o
 obj-y                                  += vdso-wrap.o
 obj-$(CONFIG_COMPAT_VDSO)              += vdso32-wrap.o
 
+# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
+$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
+$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so
+
 obj-y                                  += probes/
 head-y                                 := head.o
 extra-y                                        += $(head-y) vmlinux.lds
index 4c9b5b4..a0f3d0a 100644 (file)
@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1286807
        {
                ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+               /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
+               ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
        },
 #endif
        {},
index d72c4b4..2cb9cc9 100644 (file)
@@ -654,7 +654,6 @@ static const struct __ftr_reg_entry {
        ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
        ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
                               &id_aa64isar1_override),
-       ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
        ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
                               &id_aa64isar2_override),
 
@@ -810,7 +809,7 @@ static void __init sort_ftr_regs(void)
                 * to sys_id for subsequent binary search in get_arm64_ftr_reg()
                 * to work correctly.
                 */
-               BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
+               BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
        }
 }
 
index 172452f..ac1964e 100644 (file)
@@ -52,9 +52,6 @@ GCOV_PROFILE := n
 targets += vdso.lds
 CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
 
-# Force dependency (incbin is bad)
-$(obj)/vdso.o : $(obj)/vdso.so
-
 # Link rule for the .so file, .lds has to be first
 $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
        $(call if_changed,vdsold_and_vdso_check)
index ed181be..05ba1aa 100644 (file)
@@ -131,9 +131,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
 targets += vdso.lds
 CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
 
-# Force dependency (vdso.s includes vdso.so through incbin)
-$(obj)/vdso.o: $(obj)/vdso.so
-
 include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE
        $(call if_changed,vdsosym)
 
index b7c81da..b21f91c 100644 (file)
@@ -99,3 +99,11 @@ void __init early_ioremap_init(void)
 {
        early_ioremap_setup();
 }
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+                                unsigned long flags)
+{
+       unsigned long pfn = PHYS_PFN(offset);
+
+       return pfn_is_map_memory(pfn);
+}
index df23239..b1e25ae 100644 (file)
@@ -543,10 +543,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        file->f_mode |= FMODE_LSEEK;
        dmabuf->file = file;
 
-       ret = dma_buf_stats_setup(dmabuf);
-       if (ret)
-               goto err_sysfs;
-
        mutex_init(&dmabuf->lock);
        INIT_LIST_HEAD(&dmabuf->attachments);
 
@@ -554,6 +550,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        list_add(&dmabuf->list_node, &db_list.head);
        mutex_unlock(&db_list.lock);
 
+       ret = dma_buf_stats_setup(dmabuf);
+       if (ret)
+               goto err_sysfs;
+
        return dmabuf;
 
 err_sysfs:
index 8f0e6d9..c317078 100644 (file)
@@ -296,6 +296,7 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
 {
        struct amdgpu_device *adev = ctx->adev;
        enum amd_dpm_forced_level level;
+       u32 current_stable_pstate;
        int r;
 
        mutex_lock(&adev->pm.stable_pstate_ctx_lock);
@@ -304,6 +305,10 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
                goto done;
        }
 
+       r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
+       if (r || (stable_pstate == current_stable_pstate))
+               goto done;
+
        switch (stable_pstate) {
        case AMDGPU_CTX_STABLE_PSTATE_NONE:
                level = AMD_DPM_FORCED_LEVEL_AUTO;
index 039b90c..45f0188 100644 (file)
 #include "mxgpu_vi.h"
 #include "amdgpu_dm.h"
 
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
 #define ixPCIE_LC_L1_PM_SUBSTATE       0x100100C6
 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK       0x00000001L
 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK   0x00000002L
@@ -1134,13 +1138,24 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
                WREG32_PCIE(ixPCIE_LC_CNTL, data);
 }
 
+static bool aspm_support_quirk_check(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+       struct cpuinfo_x86 *c = &cpu_data(0);
+
+       return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+       return true;
+#endif
+}
+
 static void vi_program_aspm(struct amdgpu_device *adev)
 {
        u32 data, data1, orig;
        bool bL1SS = false;
        bool bClkReqSupport = true;
 
-       if (!amdgpu_device_should_use_aspm(adev))
+       if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
                return;
 
        if (adev->flags & AMD_IS_APU ||
index d7559e5..e708f07 100644 (file)
@@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
                dc->hwss.init_hw = dcn20_fpga_init_hw;
                dc->hwseq->funcs.init_pipes = NULL;
        }
-       if (dc->debug.disable_z10) {
-               /*hw not support z10 or sw disable it*/
-               dc->hwss.z10_restore = NULL;
-               dc->hwss.z10_save_init = NULL;
-       }
 }
index f154475..f10a025 100644 (file)
@@ -1351,14 +1351,8 @@ static int smu_disable_dpms(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
-       /*
-        * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
-        * the workaround which always reset the asic in suspend.
-        * It's likely that workaround will be dropped in the future.
-        * Then the change here should be dropped together.
-        */
        bool use_baco = !smu->is_apu &&
-               (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
+               ((amdgpu_in_reset(adev) &&
                  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
                 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
 
index 94fcdb7..eeaa8d0 100644 (file)
@@ -1605,17 +1605,17 @@ void i915_vma_close(struct i915_vma *vma)
 
 static void __i915_vma_remove_closed(struct i915_vma *vma)
 {
-       struct intel_gt *gt = vma->vm->gt;
-
-       spin_lock_irq(&gt->closed_lock);
        list_del_init(&vma->closed_link);
-       spin_unlock_irq(&gt->closed_lock);
 }
 
 void i915_vma_reopen(struct i915_vma *vma)
 {
+       struct intel_gt *gt = vma->vm->gt;
+
+       spin_lock_irq(&gt->closed_lock);
        if (i915_vma_is_closed(vma))
                __i915_vma_remove_closed(vma);
+       spin_unlock_irq(&gt->closed_lock);
 }
 
 void i915_vma_release(struct kref *ref)
@@ -1641,6 +1641,7 @@ static void force_unbind(struct i915_vma *vma)
 static void release_references(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
+       struct intel_gt *gt = vma->vm->gt;
 
        GEM_BUG_ON(i915_vma_is_active(vma));
 
@@ -1650,7 +1651,9 @@ static void release_references(struct i915_vma *vma)
                rb_erase(&vma->obj_node, &obj->vma.tree);
        spin_unlock(&obj->vma.lock);
 
+       spin_lock_irq(&gt->closed_lock);
        __i915_vma_remove_closed(vma);
+       spin_unlock_irq(&gt->closed_lock);
 
        __i915_vma_put(vma);
 }
index daf9f87..a2141d3 100644 (file)
@@ -46,8 +46,9 @@ static bool
 nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
                           struct nouveau_backlight *bl)
 {
-       const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL);
-       if (nb < 0 || nb >= 100)
+       const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL);
+
+       if (nb < 0)
                return false;
        if (nb > 0)
                snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb);
@@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector)
                                            nv_encoder, ops, &props);
        if (IS_ERR(bl->dev)) {
                if (bl->id >= 0)
-                       ida_simple_remove(&bl_ida, bl->id);
+                       ida_free(&bl_ida, bl->id);
                ret = PTR_ERR(bl->dev);
                goto fail_alloc;
        }
@@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector)
                return;
 
        if (bl->id >= 0)
-               ida_simple_remove(&bl_ida, bl->id);
+               ida_free(&bl_ida, bl->id);
 
        backlight_device_unregister(bl->dev);
        nv_conn->backlight = NULL;
index 992cc28..2ed528c 100644 (file)
@@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
 
        mutex_init(&tdev->iommu.mutex);
 
-       if (iommu_present(&platform_bus_type)) {
+       if (device_iommu_mapped(dev)) {
                tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
                if (!tdev->iommu.domain)
                        goto error;
index 6c58b0f..98b78ec 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/drm_scdc_helper.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/of_address.h>
 #include <linux/of_gpio.h>
index a3bfbb6..162dfeb 100644 (file)
@@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
                *seqno = atomic_add_return(1, &dev_priv->marker_seq);
        } while (*seqno == 0);
 
-       if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
+       if (!vmw_has_fences(dev_priv)) {
 
                /*
                 * Don't request hardware to send a fence. The
@@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
  */
 bool vmw_cmd_supported(struct vmw_private *vmw)
 {
-       if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
-                                 SVGA_CAP_CMD_BUFFERS_2)) != 0)
-               return true;
+       bool has_cmdbufs =
+               (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+                                     SVGA_CAP_CMD_BUFFERS_2)) != 0;
+       if (vmw_is_svga_v3(vmw))
+               return (has_cmdbufs &&
+                       (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
        /*
         * We have FIFO cmd's
         */
-       return vmw->fifo_mem != NULL;
+       return has_cmdbufs || vmw->fifo_mem != NULL;
 }
index ea3ecdd..6de0b9e 100644 (file)
@@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw,
                outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
 }
 
+static inline bool vmw_has_fences(struct vmw_private *vmw)
+{
+       if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+                                 SVGA_CAP_CMD_BUFFERS_2)) != 0)
+               return true;
+       return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
+}
+
 #endif
index 8ee3457..adf17c7 100644 (file)
@@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
 
 static int vmw_fb_kms_framebuffer(struct fb_info *info)
 {
-       struct drm_mode_fb_cmd2 mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd = {0};
        struct vmw_fb_par *par = info->par;
        struct fb_var_screeninfo *var = &info->var;
        struct drm_framebuffer *cur_fb;
index 59d6a2d..66cc35d 100644 (file)
@@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence)
        return container_of(fence->base.lock, struct vmw_fence_manager, lock);
 }
 
+static u32 vmw_fence_goal_read(struct vmw_private *vmw)
+{
+       if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+               return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
+       else
+               return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
+}
+
+static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
+{
+       if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+               vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
+       else
+               vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
+}
+
 /*
  * Note on fencing subsystem usage of irqs:
  * Typically the vmw_fences_update function is called
@@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
        if (likely(!fman->seqno_valid))
                return false;
 
-       goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
+       goal_seqno = vmw_fence_goal_read(fman->dev_priv);
        if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
                return false;
 
@@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
        list_for_each_entry(fence, &fman->fence_list, head) {
                if (!list_empty(&fence->seq_passed_actions)) {
                        fman->seqno_valid = true;
-                       vmw_fifo_mem_write(fman->dev_priv,
-                                          SVGA_FIFO_FENCE_GOAL,
-                                          fence->base.seqno);
+                       vmw_fence_goal_write(fman->dev_priv,
+                                            fence->base.seqno);
                        break;
                }
        }
@@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
        if (dma_fence_is_signaled_locked(&fence->base))
                return false;
 
-       goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
+       goal_seqno = vmw_fence_goal_read(fman->dev_priv);
        if (likely(fman->seqno_valid &&
                   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
                return false;
 
-       vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
-                          fence->base.seqno);
+       vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
        fman->seqno_valid = true;
 
        return true;
index c5191de..fe4732b 100644 (file)
 
 #define VMW_FENCE_WRAP (1 << 24)
 
+static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
+{
+       if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
+               return SVGA_IRQFLAG_REG_FENCE_GOAL;
+       else
+               return SVGA_IRQFLAG_FENCE_GOAL;
+}
+
 /**
  * vmw_thread_fn - Deferred (process context) irq handler
  *
@@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
                wake_up_all(&dev_priv->fifo_queue);
 
        if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
-                             SVGA_IRQFLAG_FENCE_GOAL)) &&
+                             vmw_irqflag_fence_goal(dev_priv))) &&
            !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
                ret = IRQ_WAKE_THREAD;
 
@@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
                return true;
 
-       if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
-           vmw_fifo_idle(dev_priv, seqno))
+       if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
                return true;
 
        /**
@@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
                      unsigned long timeout)
 {
        struct vmw_fifo_state *fifo_state = dev_priv->fifo;
+       bool fifo_down = false;
 
        uint32_t count = 0;
        uint32_t signal_seq;
@@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
         */
 
        if (fifo_idle) {
-               down_read(&fifo_state->rwsem);
                if (dev_priv->cman) {
                        ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
                                              10*HZ);
                        if (ret)
                                goto out_err;
+               } else if (fifo_state) {
+                       down_read(&fifo_state->rwsem);
+                       fifo_down = true;
                }
        }
 
@@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
                }
        }
        finish_wait(&dev_priv->fence_queue, &__wait);
-       if (ret == 0 && fifo_idle)
+       if (ret == 0 && fifo_idle && fifo_state)
                vmw_fence_write(dev_priv, signal_seq);
 
        wake_up_all(&dev_priv->fence_queue);
 out_err:
-       if (fifo_idle)
+       if (fifo_down)
                up_read(&fifo_state->rwsem);
 
        return ret;
@@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
 
 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
 {
-       vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+       vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
                               &dev_priv->goal_queue_waiters);
 }
 
 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
 {
-       vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
+       vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
                                  &dev_priv->goal_queue_waiters);
 }
 
index bbd2f4e..93431e8 100644 (file)
@@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
                ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
                                                      mode_cmd,
                                                      is_bo_proxy);
-
                /*
                 * vmw_create_bo_proxy() adds a reference that is no longer
                 * needed
@@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        ret = vmw_user_lookup_handle(dev_priv, file_priv,
                                     mode_cmd->handles[0],
                                     &surface, &bo);
-       if (ret)
+       if (ret) {
+               DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
+                         mode_cmd->handles[0], mode_cmd->handles[0]);
                goto err_out;
+       }
 
 
        if (!bo &&
            !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
-               DRM_ERROR("Surface size cannot exceed %dx%d",
+               DRM_ERROR("Surface size cannot exceed %dx%d\n",
                        dev_priv->texture_max_width,
                        dev_priv->texture_max_height);
                goto err_out;
index 68a8a27..f2b038f 100644 (file)
@@ -960,7 +960,7 @@ config SENSORS_LTC4261
 
 config SENSORS_LTQ_CPUTEMP
        bool "Lantiq cpu temperature sensor driver"
-       depends on LANTIQ
+       depends on SOC_XWAY
        help
          If you say yes here you get support for the temperature
          sensor inside your CPU.
index b86d9df..52c9e7d 100644 (file)
@@ -708,10 +708,21 @@ static int tmp401_probe(struct i2c_client *client)
        return 0;
 }
 
+static const struct of_device_id __maybe_unused tmp4xx_of_match[] = {
+       { .compatible = "ti,tmp401", },
+       { .compatible = "ti,tmp411", },
+       { .compatible = "ti,tmp431", },
+       { .compatible = "ti,tmp432", },
+       { .compatible = "ti,tmp435", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tmp4xx_of_match);
+
 static struct i2c_driver tmp401_driver = {
        .class          = I2C_CLASS_HWMON,
        .driver = {
                .name   = "tmp401",
+               .of_match_table = of_match_ptr(tmp4xx_of_match),
        },
        .probe_new      = tmp401_probe,
        .id_table       = tmp401_id,
index cf82b1f..87e81c6 100644 (file)
@@ -809,6 +809,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        u32 reg, offset;
 
+       if (priv->wol_ports_mask & BIT(port))
+               return;
+
        if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
                if (priv->type == BCM4908_DEVICE_ID ||
                    priv->type == BCM7445_DEVICE_ID)
index 9e28219..faccfb3 100644 (file)
@@ -403,6 +403,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
 {
        struct ocelot *ocelot = ds->priv;
        struct felix *felix = ocelot_to_felix(ocelot);
+       struct ocelot_vcap_block *block_vcap_is2;
        struct ocelot_vcap_filter *trap;
        enum ocelot_mask_mode mask_mode;
        unsigned long port_mask;
@@ -422,9 +423,13 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
        /* We are sure that "cpu" was found, otherwise
         * dsa_tree_setup_default_cpu() would have failed earlier.
         */
+       block_vcap_is2 = &ocelot->block[VCAP_IS2];
 
        /* Make sure all traps are set up for that destination */
-       list_for_each_entry(trap, &ocelot->traps, trap_list) {
+       list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+               if (!trap->is_trap)
+                       continue;
+
                /* Figure out the current trapping destination */
                if (using_tag_8021q) {
                        /* Redirect to the tag_8021q CPU port. If timestamps
index 3a529ee..8318339 100644 (file)
@@ -449,7 +449,7 @@ static int aq_pm_freeze(struct device *dev)
 
 static int aq_pm_suspend_poweroff(struct device *dev)
 {
-       return aq_suspend_common(dev, false);
+       return aq_suspend_common(dev, true);
 }
 
 static int aq_pm_thaw(struct device *dev)
@@ -459,7 +459,7 @@ static int aq_pm_thaw(struct device *dev)
 
 static int aq_pm_resume_restore(struct device *dev)
 {
-       return atl_resume_common(dev, false);
+       return atl_resume_common(dev, true);
 }
 
 static const struct dev_pm_ops aq_pm_ops = {
index 77e76c9..8201ce7 100644 (file)
@@ -346,7 +346,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                     int budget)
 {
        struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
-       bool is_rsc_completed = true;
        int err = 0;
 
        for (; (self->sw_head != self->hw_head) && budget;
@@ -364,12 +363,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                        continue;
 
                if (!buff->is_eop) {
+                       unsigned int frag_cnt = 0U;
                        buff_ = buff;
                        do {
+                               bool is_rsc_completed = true;
+
                                if (buff_->next >= self->size) {
                                        err = -EIO;
                                        goto err_exit;
                                }
+
+                               frag_cnt++;
                                next_ = buff_->next,
                                buff_ = &self->buff_ring[next_];
                                is_rsc_completed =
@@ -377,18 +381,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                                            next_,
                                                            self->hw_head);
 
-                               if (unlikely(!is_rsc_completed))
-                                       break;
+                               if (unlikely(!is_rsc_completed) ||
+                                               frag_cnt > MAX_SKB_FRAGS) {
+                                       err = 0;
+                                       goto err_exit;
+                               }
 
                                buff->is_error |= buff_->is_error;
                                buff->is_cso_err |= buff_->is_cso_err;
 
                        } while (!buff_->is_eop);
 
-                       if (!is_rsc_completed) {
-                               err = 0;
-                               goto err_exit;
-                       }
                        if (buff->is_error ||
                            (buff->is_lro && buff->is_cso_err)) {
                                buff_ = buff;
@@ -446,7 +449,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                       ALIGN(hdr_len, sizeof(long)));
 
                if (buff->len - hdr_len > 0) {
-                       skb_add_rx_frag(skb, 0, buff->rxdata.page,
+                       skb_add_rx_frag(skb, i++, buff->rxdata.page,
                                        buff->rxdata.pg_off + hdr_len,
                                        buff->len - hdr_len,
                                        AQ_CFG_RX_FRAME_MAX);
@@ -455,7 +458,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
 
                if (!buff->is_eop) {
                        buff_ = buff;
-                       i = 1U;
                        do {
                                next_ = buff_->next;
                                buff_ = &self->buff_ring[next_];
index d875ce3..15ede72 100644 (file)
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
                err = -ENXIO;
                goto err_exit;
        }
+
+       /* Validate that the new hw_head_ is reasonable. */
+       if (hw_head_ >= ring->size) {
+               err = -ENXIO;
+               goto err_exit;
+       }
+
        ring->hw_head = hw_head_;
        err = aq_hw_err_from_flags(self);
 
index bf1ec8f..e87e46c 100644 (file)
@@ -3999,6 +3999,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
                goto err;
        }
        priv->wol_irq = platform_get_irq_optional(pdev, 2);
+       if (priv->wol_irq == -EPROBE_DEFER) {
+               err = priv->wol_irq;
+               goto err;
+       }
 
        priv->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(priv->base)) {
index e7b4e3e..8d719f8 100644 (file)
@@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
                goto out;
        na = ret;
 
-       memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
+       memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
        strim(p->id);
-       memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
+       memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
        strim(p->sn);
-       memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
+       memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
        strim(p->pn);
-       memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
-       strim((char *)p->na);
+       memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
+       strim(p->na);
 
 out:
        vfree(vpd);
index 79df5a7..0040dca 100644 (file)
@@ -1399,8 +1399,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* alloc_etherdev ensures aligned and zeroed private structures */
        dev = alloc_etherdev (sizeof (*tp));
-       if (!dev)
+       if (!dev) {
+               pci_disable_device(pdev);
                return -ENOMEM;
+       }
 
        SET_NETDEV_DEV(dev, &pdev->dev);
        if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@@ -1785,6 +1787,7 @@ err_out_free_res:
 
 err_out_free_netdev:
        free_netdev (dev);
+       pci_disable_device(pdev);
        return -ENODEV;
 }
 
index 6778df2..98871f0 100644 (file)
@@ -7549,42 +7549,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
                            struct i40e_fwd_adapter *fwd)
 {
+       struct i40e_channel *ch = NULL, *ch_tmp, *iter;
        int ret = 0, num_tc = 1,  i, aq_err;
-       struct i40e_channel *ch, *ch_tmp;
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
 
-       if (list_empty(&vsi->macvlan_list))
-               return -EINVAL;
-
        /* Go through the list and find an available channel */
-       list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
-               if (!i40e_is_channel_macvlan(ch)) {
-                       ch->fwd = fwd;
+       list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+               if (!i40e_is_channel_macvlan(iter)) {
+                       iter->fwd = fwd;
                        /* record configuration for macvlan interface in vdev */
                        for (i = 0; i < num_tc; i++)
                                netdev_bind_sb_channel_queue(vsi->netdev, vdev,
                                                             i,
-                                                            ch->num_queue_pairs,
-                                                            ch->base_queue);
-                       for (i = 0; i < ch->num_queue_pairs; i++) {
+                                                            iter->num_queue_pairs,
+                                                            iter->base_queue);
+                       for (i = 0; i < iter->num_queue_pairs; i++) {
                                struct i40e_ring *tx_ring, *rx_ring;
                                u16 pf_q;
 
-                               pf_q = ch->base_queue + i;
+                               pf_q = iter->base_queue + i;
 
                                /* Get to TX ring ptr */
                                tx_ring = vsi->tx_rings[pf_q];
-                               tx_ring->ch = ch;
+                               tx_ring->ch = iter;
 
                                /* Get the RX ring ptr */
                                rx_ring = vsi->rx_rings[pf_q];
-                               rx_ring->ch = ch;
+                               rx_ring->ch = iter;
                        }
+                       ch = iter;
                        break;
                }
        }
 
+       if (!ch)
+               return -EINVAL;
+
        /* Guarantee all rings are updated before we update the
         * MAC address filter.
         */
index 8ed3c9a..a895e3a 100644 (file)
@@ -540,6 +540,7 @@ struct ice_pf {
        struct mutex avail_q_mutex;     /* protects access to avail_[rx|tx]qs */
        struct mutex sw_mutex;          /* lock for protecting VSI alloc flow */
        struct mutex tc_mutex;          /* lock to protect TC changes */
+       struct mutex adev_mutex;        /* lock to protect aux device access */
        u32 msg_enable;
        struct ice_ptp ptp;
        struct tty_driver *ice_gnss_tty_driver;
index 25a436d..3e3b2ed 100644 (file)
@@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
        if (WARN_ON_ONCE(!in_task()))
                return;
 
+       mutex_lock(&pf->adev_mutex);
        if (!pf->adev)
-               return;
+               goto finish;
 
        device_lock(&pf->adev->dev);
        iadrv = ice_get_auxiliary_drv(pf);
        if (iadrv && iadrv->event_handler)
                iadrv->event_handler(pf, event);
        device_unlock(&pf->adev->dev);
+finish:
+       mutex_unlock(&pf->adev_mutex);
 }
 
 /**
@@ -290,7 +293,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
                return -ENOMEM;
 
        adev = &iadev->adev;
-       pf->adev = adev;
        iadev->pf = pf;
 
        adev->id = pf->aux_idx;
@@ -300,18 +302,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
 
        ret = auxiliary_device_init(adev);
        if (ret) {
-               pf->adev = NULL;
                kfree(iadev);
                return ret;
        }
 
        ret = auxiliary_device_add(adev);
        if (ret) {
-               pf->adev = NULL;
                auxiliary_device_uninit(adev);
                return ret;
        }
 
+       mutex_lock(&pf->adev_mutex);
+       pf->adev = adev;
+       mutex_unlock(&pf->adev_mutex);
+
        return 0;
 }
 
@@ -320,12 +324,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
  */
 void ice_unplug_aux_dev(struct ice_pf *pf)
 {
-       if (!pf->adev)
-               return;
+       struct auxiliary_device *adev;
 
-       auxiliary_device_delete(pf->adev);
-       auxiliary_device_uninit(pf->adev);
+       mutex_lock(&pf->adev_mutex);
+       adev = pf->adev;
        pf->adev = NULL;
+       mutex_unlock(&pf->adev_mutex);
+
+       if (adev) {
+               auxiliary_device_delete(adev);
+               auxiliary_device_uninit(adev);
+       }
 }
 
 /**
index 9a0a358..949669f 100644 (file)
@@ -3769,6 +3769,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
 static void ice_deinit_pf(struct ice_pf *pf)
 {
        ice_service_task_stop(pf);
+       mutex_destroy(&pf->adev_mutex);
        mutex_destroy(&pf->sw_mutex);
        mutex_destroy(&pf->tc_mutex);
        mutex_destroy(&pf->avail_q_mutex);
@@ -3847,6 +3848,7 @@ static int ice_init_pf(struct ice_pf *pf)
 
        mutex_init(&pf->sw_mutex);
        mutex_init(&pf->tc_mutex);
+       mutex_init(&pf->adev_mutex);
 
        INIT_HLIST_HEAD(&pf->aq_wait_list);
        spin_lock_init(&pf->aq_wait_lock);
index a1cd332..da025c2 100644 (file)
@@ -2287,6 +2287,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
 
 /**
  * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @hw: pointer to the hw struct
  * @tx: PTP Tx tracker to clean up
  *
  * Loop through the Tx timestamp requests and see if any of them have been
@@ -2295,7 +2296,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
  * timestamp will never be captured. This might happen if the packet gets
  * discarded before it reaches the PHY timestamping block.
  */
-static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
+static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
 {
        u8 idx;
 
@@ -2304,11 +2305,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
 
        for_each_set_bit(idx, tx->in_use, tx->len) {
                struct sk_buff *skb;
+               u64 raw_tstamp;
 
                /* Check if this SKB has been waiting for too long */
                if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
                        continue;
 
+               /* Read tstamp to be able to use this register again */
+               ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
+                                   &raw_tstamp);
+
                spin_lock(&tx->lock);
                skb = tx->tstamps[idx].skb;
                tx->tstamps[idx].skb = NULL;
@@ -2330,7 +2336,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
 
        ice_ptp_update_cached_phctime(pf);
 
-       ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
+       ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
 
        /* Run twice a second */
        kthread_queue_delayed_work(ptp->kworker, &ptp->work,
index b72606c..2889e05 100644 (file)
@@ -1308,12 +1308,51 @@ error_param:
 }
 
 /**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+static int
+ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+       struct ice_txq_meta txq_meta = { 0 };
+       struct ice_tx_ring *ring;
+       int err;
+
+       if (!test_bit(q_id, vf->txq_ena))
+               dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+                       q_id, vsi->vsi_num);
+
+       ring = vsi->tx_rings[q_id];
+       if (!ring)
+               return -EINVAL;
+
+       ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+       err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+       if (err) {
+               dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+                       q_id, vsi->vsi_num);
+               return err;
+       }
+
+       /* Clear enabled queues flag */
+       clear_bit(q_id, vf->txq_ena);
+
+       return 0;
+}
+
+/**
  * ice_vc_dis_qs_msg
  * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
  *
- * called from the VF to disable all or specific
- * queue(s)
+ * called from the VF to disable all or specific queue(s)
  */
 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
 {
@@ -1350,30 +1389,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
                q_map = vqs->tx_queues;
 
                for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
-                       struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
-                       struct ice_txq_meta txq_meta = { 0 };
-
                        if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                                goto error_param;
                        }
 
-                       if (!test_bit(vf_q_id, vf->txq_ena))
-                               dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
-                                       vf_q_id, vsi->vsi_num);
-
-                       ice_fill_txq_meta(vsi, ring, &txq_meta);
-
-                       if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
-                                                ring, &txq_meta)) {
-                               dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
-                                       vf_q_id, vsi->vsi_num);
+                       if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                                goto error_param;
                        }
-
-                       /* Clear enabled queues flag */
-                       clear_bit(vf_q_id, vf->txq_ena);
                }
        }
 
@@ -1622,6 +1646,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
                if (qpi->txq.ring_len > 0) {
                        vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
                        vsi->tx_rings[i]->count = qpi->txq.ring_len;
+
+                       /* Disable any existing queue first */
+                       if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
+                               v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+                               goto error_param;
+                       }
+
+                       /* Configure a queue with the requested settings */
                        if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
                                goto error_param;
index 3ad10c7..66298e2 100644 (file)
@@ -395,7 +395,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
        static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
        int i, k;
 
-       memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
+       memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
 
        if (!IS_ENABLED(CONFIG_SOC_MT7621))
                return;
index 01cf5a6..a2ee695 100644 (file)
@@ -568,10 +568,8 @@ static int
 mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_ipip_entry *ipip_entry)
 {
-       struct __ip6_tnl_parm parms6;
-
-       parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
-       return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
+       return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
+                                                &ipip_entry->parms.daddr.addr6,
                                                 &ipip_entry->dip_kvdl_index);
 }
 
@@ -579,10 +577,7 @@ static void
 mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
                                   const struct mlxsw_sp_ipip_entry *ipip_entry)
 {
-       struct __ip6_tnl_parm parms6;
-
-       parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
-       mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
+       mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
 }
 
 static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
index ca71b62..20ceac8 100644 (file)
@@ -1622,7 +1622,7 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
                trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
                trap->action.port_mask = 0;
                trap->take_ts = take_ts;
-               list_add_tail(&trap->trap_list, &ocelot->traps);
+               trap->is_trap = true;
                new = true;
        }
 
@@ -1634,10 +1634,8 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
                err = ocelot_vcap_filter_replace(ocelot, trap);
        if (err) {
                trap->ingress_port_mask &= ~BIT(port);
-               if (!trap->ingress_port_mask) {
-                       list_del(&trap->trap_list);
+               if (!trap->ingress_port_mask)
                        kfree(trap);
-               }
                return err;
        }
 
@@ -1657,11 +1655,8 @@ int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
                return 0;
 
        trap->ingress_port_mask &= ~BIT(port);
-       if (!trap->ingress_port_mask) {
-               list_del(&trap->trap_list);
-
+       if (!trap->ingress_port_mask)
                return ocelot_vcap_filter_del(ocelot, trap);
-       }
 
        return ocelot_vcap_filter_replace(ocelot, trap);
 }
index 03b5e59..51cf241 100644 (file)
@@ -280,9 +280,10 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
                        filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
                        break;
                case FLOW_ACTION_TRAP:
-                       if (filter->block_id != VCAP_IS2) {
+                       if (filter->block_id != VCAP_IS2 ||
+                           filter->lookup != 0) {
                                NL_SET_ERR_MSG_MOD(extack,
-                                                  "Trap action can only be offloaded to VCAP IS2");
+                                                  "Trap action can only be offloaded to VCAP IS2 lookup 0");
                                return -EOPNOTSUPP;
                        }
                        if (filter->goto_target != -1) {
@@ -295,7 +296,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
                        filter->action.cpu_copy_ena = true;
                        filter->action.cpu_qu_num = 0;
                        filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
-                       list_add_tail(&filter->trap_list, &ocelot->traps);
+                       filter->is_trap = true;
                        break;
                case FLOW_ACTION_POLICE:
                        if (filter->block_id == PSFP_BLOCK_ID) {
@@ -878,8 +879,6 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
 
        ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
        if (ret) {
-               if (!list_empty(&filter->trap_list))
-                       list_del(&filter->trap_list);
                kfree(filter);
                return ret;
        }
index c8701ac..eeb4cc0 100644 (file)
@@ -374,7 +374,6 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                         OCELOT_VCAP_BIT_0);
        vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
                     ~filter->ingress_port_mask);
-       vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
        vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
                         OCELOT_VCAP_BIT_ANY);
        vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
@@ -1217,6 +1216,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
                struct ocelot_vcap_filter *tmp;
 
                tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+               /* Read back the filter's counters before moving it */
+               vcap_entry_get(ocelot, i - 1, tmp);
                vcap_entry_set(ocelot, i, tmp);
        }
 
@@ -1250,7 +1251,11 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
        struct ocelot_vcap_filter del_filter;
        int i, index;
 
+       /* Need to inherit the block_id so that vcap_entry_set()
+        * does not get confused and knows where to install it.
+        */
        memset(&del_filter, 0, sizeof(del_filter));
+       del_filter.block_id = filter->block_id;
 
        /* Gets index of the filter */
        index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1265,6 +1270,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
                struct ocelot_vcap_filter *tmp;
 
                tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+               /* Read back the filter's counters before moving it */
+               vcap_entry_get(ocelot, i + 1, tmp);
                vcap_entry_set(ocelot, i, tmp);
        }
 
index 6ffc62c..0a7a757 100644 (file)
@@ -256,7 +256,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = ionic_map_bars(ionic);
        if (err)
-               goto err_out_pci_disable_device;
+               goto err_out_pci_release_regions;
 
        /* Configure the device */
        err = ionic_setup(ionic);
@@ -360,6 +360,7 @@ err_out_teardown:
 
 err_out_unmap_bars:
        ionic_unmap_bars(ionic);
+err_out_pci_release_regions:
        pci_release_regions(pdev);
 err_out_pci_disable_device:
        pci_disable_device(pdev);
index 50d5359..f8edb3f 100644 (file)
@@ -3579,6 +3579,11 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
                n_parts++;
        }
 
+       if (!n_parts) {
+               kfree(parts);
+               return 0;
+       }
+
        rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
 fail:
        if (rc)
index 377df8b..40df910 100644 (file)
@@ -867,7 +867,9 @@ static void efx_set_xdp_channels(struct efx_nic *efx)
 
 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
 {
-       struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+       struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
+                          *ptp_channel = efx_ptp_channel(efx);
+       struct efx_ptp_data *ptp_data = efx->ptp_data;
        unsigned int i, next_buffer_table = 0;
        u32 old_rxq_entries, old_txq_entries;
        int rc, rc2;
@@ -938,6 +940,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
 
        efx_set_xdp_channels(efx);
 out:
+       efx->ptp_data = NULL;
        /* Destroy unused channel structures */
        for (i = 0; i < efx->n_channels; i++) {
                channel = other_channel[i];
@@ -948,6 +951,7 @@ out:
                }
        }
 
+       efx->ptp_data = ptp_data;
        rc2 = efx_soft_enable_interrupts(efx);
        if (rc2) {
                rc = rc ? rc : rc2;
@@ -966,6 +970,7 @@ rollback:
        efx->txq_entries = old_txq_entries;
        for (i = 0; i < efx->n_channels; i++)
                swap(efx->channel[i], other_channel[i]);
+       efx_ptp_update_channel(efx, ptp_channel);
        goto out;
 }
 
index f0ef515..4625f85 100644 (file)
@@ -45,6 +45,7 @@
 #include "farch_regs.h"
 #include "tx.h"
 #include "nic.h" /* indirectly includes ptp.h */
+#include "efx_channels.h"
 
 /* Maximum number of events expected to make up a PTP event */
 #define        MAX_EVENT_FRAGS                 3
@@ -541,6 +542,12 @@ struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
        return efx->ptp_data ? efx->ptp_data->channel : NULL;
 }
 
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel)
+{
+       if (efx->ptp_data)
+               efx->ptp_data->channel = channel;
+}
+
 static u32 last_sync_timestamp_major(struct efx_nic *efx)
 {
        struct efx_channel *channel = efx_ptp_channel(efx);
@@ -1443,6 +1450,11 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
        int rc = 0;
        unsigned int pos;
 
+       if (efx->ptp_data) {
+               efx->ptp_data->channel = channel;
+               return 0;
+       }
+
        ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
        efx->ptp_data = ptp;
        if (!efx->ptp_data)
@@ -2176,7 +2188,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
        .pre_probe              = efx_ptp_probe_channel,
        .post_remove            = efx_ptp_remove_channel,
        .get_name               = efx_ptp_get_channel_name,
-       /* no copy operation; there is no need to reallocate this channel */
+       .copy                   = efx_copy_channel,
        .receive_skb            = efx_ptp_rx,
        .want_txqs              = efx_ptp_want_txqs,
        .keep_eventq            = false,
index 9855e8c..7b1ef70 100644 (file)
@@ -16,6 +16,7 @@ struct ethtool_ts_info;
 int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
 void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
 struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
+void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
 void efx_ptp_remove(struct efx_nic *efx);
 int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
 int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
index fcf17d8..644bb54 100644 (file)
@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        /* Enable pci device */
-       ret = pci_enable_device(pdev);
+       ret = pcim_enable_device(pdev);
        if (ret) {
                dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
                        __func__);
@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
                pcim_iounmap_regions(pdev, BIT(i));
                break;
        }
-
-       pci_disable_device(pdev);
 }
 
 static int __maybe_unused stmmac_pci_suspend(struct device *dev)
index fc53b71..cd9aa35 100644 (file)
@@ -1743,7 +1743,7 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
 
 static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
 {
-       u32 data;
+       int data;
 
        phy_lock_mdio_bus(phydev);
        __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
@@ -2444,8 +2444,7 @@ static int lan8804_config_init(struct phy_device *phydev)
 
 static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
 {
-       u16 tsu_irq_status;
-       int irq_status;
+       int irq_status, tsu_irq_status;
 
        irq_status = phy_read(phydev, LAN8814_INTS);
        if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
@@ -2657,6 +2656,7 @@ static struct phy_driver ksphy_driver[] = {
        .name           = "Micrel KS8737",
        /* PHY_BASIC_FEATURES */
        .driver_data    = &ks8737_type,
+       .probe          = kszphy_probe,
        .config_init    = kszphy_config_init,
        .config_intr    = kszphy_config_intr,
        .handle_interrupt = kszphy_handle_interrupt,
@@ -2782,8 +2782,8 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = ksz8061_config_init,
        .config_intr    = kszphy_config_intr,
        .handle_interrupt = kszphy_handle_interrupt,
-       .suspend        = kszphy_suspend,
-       .resume         = kszphy_resume,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
 }, {
        .phy_id         = PHY_ID_KSZ9021,
        .phy_id_mask    = 0x000ffffe,
index beb2b66..f122026 100644 (file)
@@ -970,8 +970,13 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
 {
        struct phy_device *phydev = phy_dat;
        struct phy_driver *drv = phydev->drv;
+       irqreturn_t ret;
 
-       return drv->handle_interrupt(phydev);
+       mutex_lock(&phydev->lock);
+       ret = drv->handle_interrupt(phydev);
+       mutex_unlock(&phydev->lock);
+
+       return ret;
 }
 
 /**
index 71eb7d0..90a5df1 100644 (file)
@@ -1288,6 +1288,7 @@ static void ath11k_core_restart(struct work_struct *work)
 
                ieee80211_stop_queues(ar->hw);
                ath11k_mac_drain_tx(ar);
+               complete(&ar->completed_11d_scan);
                complete(&ar->scan.started);
                complete(&ar->scan.completed);
                complete(&ar->peer_assoc_done);
index c0228e9..b8634ed 100644 (file)
@@ -38,6 +38,8 @@
 
 extern unsigned int ath11k_frame_mode;
 
+#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
+
 #define ATH11K_MON_TIMER_INTERVAL  10
 
 enum ath11k_supported_bw {
@@ -189,6 +191,12 @@ enum ath11k_scan_state {
        ATH11K_SCAN_ABORTING,
 };
 
+enum ath11k_11d_state {
+       ATH11K_11D_IDLE,
+       ATH11K_11D_PREPARING,
+       ATH11K_11D_RUNNING,
+};
+
 enum ath11k_dev_flags {
        ATH11K_CAC_RUNNING,
        ATH11K_FLAG_CORE_REGISTERED,
@@ -607,9 +615,8 @@ struct ath11k {
        bool dfs_block_radar_events;
        struct ath11k_thermal thermal;
        u32 vdev_id_11d_scan;
-       struct completion finish_11d_scan;
-       struct completion finish_11d_ch_list;
-       bool pending_11d;
+       struct completion completed_11d_scan;
+       enum ath11k_11d_state state_11d;
        bool regdom_set_by_user;
        int hw_rate_code;
        u8 twt_enabled;
index e6b34b0..58ff761 100644 (file)
@@ -3601,26 +3601,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
        if (ret)
                goto exit;
 
-       /* Currently the pending_11d=true only happened 1 time while
-        * wlan interface up in ath11k_mac_11d_scan_start(), it is called by
-        * ath11k_mac_op_add_interface(), after wlan interface up,
-        * pending_11d=false always.
-        * If remove below wait, it always happened scan fail and lead connect
-        * fail while wlan interface up, because it has a 11d scan which is running
-        * in firmware, and lead this scan failed.
-        */
-       if (ar->pending_11d) {
-               long time_left;
-               unsigned long timeout = 5 * HZ;
-
-               if (ar->supports_6ghz)
-                       timeout += 5 * HZ;
-
-               time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
-               ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
-                          "mac wait 11d channel list time left %ld\n", time_left);
-       }
-
        memset(&arg, 0, sizeof(arg));
        ath11k_wmi_start_scan_init(ar, &arg);
        arg.vdev_id = arvif->vdev_id;
@@ -3686,6 +3666,10 @@ exit:
                kfree(arg.extraie.ptr);
 
        mutex_unlock(&ar->conf_mutex);
+
+       if (ar->state_11d == ATH11K_11D_PREPARING)
+               ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
        return ret;
 }
 
@@ -5814,7 +5798,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
 
        /* TODO: Do we need to enable ANI? */
 
-       ath11k_reg_update_chan_list(ar);
+       ath11k_reg_update_chan_list(ar, false);
 
        ar->num_started_vdevs = 0;
        ar->num_created_vdevs = 0;
@@ -5881,6 +5865,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
        cancel_work_sync(&ar->ab->update_11d_work);
        cancel_work_sync(&ar->ab->rfkill_work);
 
+       if (ar->state_11d == ATH11K_11D_PREPARING) {
+               ar->state_11d = ATH11K_11D_IDLE;
+               complete(&ar->completed_11d_scan);
+       }
+
        spin_lock_bh(&ar->data_lock);
        list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
                list_del(&ppdu_stats->list);
@@ -6051,7 +6040,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
        return false;
 }
 
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
 {
        struct wmi_11d_scan_start_params param;
        int ret;
@@ -6079,28 +6068,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
 
        ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
 
-       if (wait)
-               reinit_completion(&ar->finish_11d_scan);
-
        ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
        if (ret) {
                ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
                            vdev_id, ret);
        } else {
                ar->vdev_id_11d_scan = vdev_id;
-               if (wait) {
-                       ar->pending_11d = true;
-                       ret = wait_for_completion_timeout(&ar->finish_11d_scan,
-                                                         5 * HZ);
-                       ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
-                                  "mac 11d scan left time %d\n", ret);
-
-                       if (!ret)
-                               ar->pending_11d = false;
-               }
+               if (ar->state_11d == ATH11K_11D_PREPARING)
+                       ar->state_11d = ATH11K_11D_RUNNING;
        }
 
 fin:
+       if (ar->state_11d == ATH11K_11D_PREPARING) {
+               ar->state_11d = ATH11K_11D_IDLE;
+               complete(&ar->completed_11d_scan);
+       }
+
        mutex_unlock(&ar->ab->vdev_id_11d_lock);
 }
 
@@ -6123,12 +6106,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
                vdev_id = ar->vdev_id_11d_scan;
 
                ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
-               if (ret)
+               if (ret) {
                        ath11k_warn(ar->ab,
                                    "failed to stopt 11d scan vdev %d ret: %d\n",
                                    vdev_id, ret);
-               else
+               } else {
                        ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+                       ar->state_11d = ATH11K_11D_IDLE;
+                       complete(&ar->completed_11d_scan);
+               }
        }
        mutex_unlock(&ar->ab->vdev_id_11d_lock);
 }
@@ -6324,8 +6310,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
                        goto err_peer_del;
                }
 
-               ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true);
-
+               if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
+                       reinit_completion(&ar->completed_11d_scan);
+                       ar->state_11d = ATH11K_11D_PREPARING;
+               }
                break;
        case WMI_VDEV_TYPE_MONITOR:
                set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
@@ -7190,7 +7178,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
        }
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
-               ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false);
+               ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
 
        mutex_unlock(&ar->conf_mutex);
 }
@@ -8671,8 +8659,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
                ar->monitor_vdev_id = -1;
                clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
                ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
-               init_completion(&ar->finish_11d_scan);
-               init_completion(&ar->finish_11d_ch_list);
+               init_completion(&ar->completed_11d_scan);
        }
 
        return 0;
index 0e6c870..29b523a 100644 (file)
@@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
 #define ATH11K_SCAN_11D_INTERVAL               600000
 #define ATH11K_11D_INVALID_VDEV_ID             0xFFFF
 
-void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait);
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
 void ath11k_mac_11d_scan_stop(struct ath11k *ar);
 void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
 
index 81e11cd..80a6977 100644 (file)
@@ -102,7 +102,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
        ar->regdom_set_by_user = true;
 }
 
-int ath11k_reg_update_chan_list(struct ath11k *ar)
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
 {
        struct ieee80211_supported_band **bands;
        struct scan_chan_list_params *params;
@@ -111,7 +111,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
        struct channel_param *ch;
        enum nl80211_band band;
        int num_channels = 0;
-       int i, ret;
+       int i, ret, left;
+
+       if (wait && ar->state_11d != ATH11K_11D_IDLE) {
+               left = wait_for_completion_timeout(&ar->completed_11d_scan,
+                                                  ATH11K_SCAN_TIMEOUT_HZ);
+               if (!left) {
+                       ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+                                  "failed to receive 11d scan complete: timed out\n");
+                       ar->state_11d = ATH11K_11D_IDLE;
+               }
+               ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+                          "reg 11d scan wait left time %d\n", left);
+       }
+
+       if (wait &&
+           (ar->scan.state == ATH11K_SCAN_STARTING ||
+           ar->scan.state == ATH11K_SCAN_RUNNING)) {
+               left = wait_for_completion_timeout(&ar->scan.completed,
+                                                  ATH11K_SCAN_TIMEOUT_HZ);
+               if (!left)
+                       ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+                                  "failed to receive hw scan complete: timed out\n");
+
+               ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+                          "reg hw scan wait left time %d\n", left);
+       }
 
        bands = hw->wiphy->bands;
        for (band = 0; band < NUM_NL80211_BANDS; band++) {
@@ -193,11 +218,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
        ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
        kfree(params);
 
-       if (ar->pending_11d) {
-               complete(&ar->finish_11d_ch_list);
-               ar->pending_11d = false;
-       }
-
        return ret;
 }
 
@@ -263,15 +283,8 @@ int ath11k_regd_update(struct ath11k *ar)
                goto err;
        }
 
-       if (ar->pending_11d)
-               complete(&ar->finish_11d_scan);
-
        rtnl_lock();
        wiphy_lock(ar->hw->wiphy);
-
-       if (ar->pending_11d)
-               reinit_completion(&ar->finish_11d_ch_list);
-
        ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
        wiphy_unlock(ar->hw->wiphy);
        rtnl_unlock();
@@ -282,7 +295,7 @@ int ath11k_regd_update(struct ath11k *ar)
                goto err;
 
        if (ar->state == ATH11K_STATE_ON) {
-               ret = ath11k_reg_update_chan_list(ar);
+               ret = ath11k_reg_update_chan_list(ar, true);
                if (ret)
                        goto err;
        }
index 5fb9dc0..2f284f2 100644 (file)
@@ -32,5 +32,5 @@ struct ieee80211_regdomain *
 ath11k_reg_build_regd(struct ath11k_base *ab,
                      struct cur_regulatory_info *reg_info, bool intersect);
 int ath11k_regd_update(struct ath11k *ar);
-int ath11k_reg_update_chan_list(struct ath11k *ar);
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
 #endif
index b4f86c4..2751fe8 100644 (file)
@@ -2015,7 +2015,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
 {
        /* setup commonly used values */
        arg->scan_req_id = 1;
-       arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+       if (ar->state_11d == ATH11K_11D_PREPARING)
+               arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+       else
+               arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
        arg->dwell_time_active = 50;
        arg->dwell_time_active_2g = 0;
        arg->dwell_time_passive = 150;
@@ -6350,8 +6353,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
 static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
 {
        const struct wmi_11d_new_cc_ev *ev;
+       struct ath11k *ar;
+       struct ath11k_pdev *pdev;
        const void **tb;
-       int ret;
+       int ret, i;
 
        tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
        if (IS_ERR(tb)) {
@@ -6377,6 +6382,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
 
        kfree(tb);
 
+       for (i = 0; i < ab->num_radios; i++) {
+               pdev = &ab->pdevs[i];
+               ar = pdev->ar;
+               ar->state_11d = ATH11K_11D_IDLE;
+               complete(&ar->completed_11d_scan);
+       }
+
        queue_work(ab->workqueue, &ab->update_11d_work);
 
        return 0;
index 866a33f..3237d4b 100644 (file)
@@ -371,7 +371,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
        struct iwl_dbg_tlv_timer_node *node, *tmp;
 
        list_for_each_entry_safe(node, tmp, timer_list, list) {
-               del_timer(&node->timer);
+               del_timer_sync(&node->timer);
                list_del(&node->list);
                kfree(node);
        }
index 28bfa7b..e9ec63e 100644 (file)
@@ -2202,11 +2202,14 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
        if (!data->use_chanctx) {
                confbw = data->bw;
        } else {
-               struct ieee80211_chanctx_conf *chanctx_conf =
-                       rcu_dereference(vif->chanctx_conf);
+               struct ieee80211_chanctx_conf *chanctx_conf;
+
+               rcu_read_lock();
+               chanctx_conf = rcu_dereference(vif->chanctx_conf);
 
                if (!WARN_ON(!chanctx_conf))
                        confbw = chanctx_conf->def.width;
+               rcu_read_unlock();
        }
 
        WARN(bw > hwsim_get_chanwidth(confbw),
@@ -2475,11 +2478,13 @@ static void hw_scan_work(struct work_struct *work)
                        if (req->ie_len)
                                skb_put_data(probe, req->ie, req->ie_len);
 
+                       rcu_read_lock();
                        if (!ieee80211_tx_prepare_skb(hwsim->hw,
                                                      hwsim->hw_scan_vif,
                                                      probe,
                                                      hwsim->tmp_chan->band,
                                                      NULL)) {
+                               rcu_read_unlock();
                                kfree_skb(probe);
                                continue;
                        }
@@ -2487,6 +2492,7 @@ static void hw_scan_work(struct work_struct *work)
                        local_bh_disable();
                        mac80211_hwsim_tx_frame(hwsim->hw, probe,
                                                hwsim->tmp_chan);
+                       rcu_read_unlock();
                        local_bh_enable();
                }
        }
index d384d36..a62c5df 100644 (file)
@@ -817,7 +817,7 @@ err_cpkg:
 err_bus:
        return status;
 }
-module_init(ssam_core_init);
+subsys_initcall(ssam_core_init);
 
 static void __exit ssam_core_exit(void)
 {
index c1775db..ec66fde 100644 (file)
@@ -100,6 +100,14 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
                .driver_data = (void *)lid_device_props_l4D,
        },
        {
+               .ident = "Surface Pro 8",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"),
+               },
+               .driver_data = (void *)lid_device_props_l4B,
+       },
+       {
                .ident = "Surface Book 1",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
index 6b6f3e2..f73ecfd 100644 (file)
@@ -103,7 +103,7 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia
        auxiliary_set_drvdata(auxdev, priv);
 
        for (i = 0; i < intel_vsec_dev->num_resources; i++) {
-               struct intel_pmt_entry *entry = &priv->entry[i];
+               struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
 
                ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
                if (ret < 0)
index c568fae..e6cb4a1 100644 (file)
@@ -309,6 +309,20 @@ struct ibm_init_struct {
        struct ibm_struct *data;
 };
 
+/* DMI Quirks */
+struct quirk_entry {
+       bool btusb_bug;
+       u32 s2idle_bug_mmio;
+};
+
+static struct quirk_entry quirk_btusb_bug = {
+       .btusb_bug = true,
+};
+
+static struct quirk_entry quirk_s2idle_bug = {
+       .s2idle_bug_mmio = 0xfed80380,
+};
+
 static struct {
        u32 bluetooth:1;
        u32 hotkey:1;
@@ -338,6 +352,7 @@ static struct {
        u32 hotkey_poll_active:1;
        u32 has_adaptive_kbd:1;
        u32 kbd_lang:1;
+       struct quirk_entry *quirks;
 } tp_features;
 
 static struct {
@@ -4359,9 +4374,10 @@ static void bluetooth_exit(void)
        bluetooth_shutdown();
 }
 
-static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+static const struct dmi_system_id fwbug_list[] __initconst = {
        {
                .ident = "ThinkPad E485",
+               .driver_data = &quirk_btusb_bug,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_BOARD_NAME, "20KU"),
@@ -4369,6 +4385,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
        },
        {
                .ident = "ThinkPad E585",
+               .driver_data = &quirk_btusb_bug,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_BOARD_NAME, "20KV"),
@@ -4376,6 +4393,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
        },
        {
                .ident = "ThinkPad A285 - 20MW",
+               .driver_data = &quirk_btusb_bug,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_BOARD_NAME, "20MW"),
@@ -4383,6 +4401,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
        },
        {
                .ident = "ThinkPad A285 - 20MX",
+               .driver_data = &quirk_btusb_bug,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_BOARD_NAME, "20MX"),
@@ -4390,6 +4409,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
        },
        {
                .ident = "ThinkPad A485 - 20MU",
+               .driver_data = &quirk_btusb_bug,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_BOARD_NAME, "20MU"),
@@ -4397,14 +4417,125 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
        },
        {
                .ident = "ThinkPad A485 - 20MV",
+               .driver_data = &quirk_btusb_bug,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_BOARD_NAME, "20MV"),
                },
        },
+       {
+               .ident = "L14 Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
+               }
+       },
+       {
+               .ident = "T14s Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
+               }
+       },
+       {
+               .ident = "X13 Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
+               }
+       },
+       {
+               .ident = "T14 Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
+               }
+       },
+       {
+               .ident = "T14 Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
+               }
+       },
+       {
+               .ident = "T14 Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
+               }
+       },
+       {
+               .ident = "T14s Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+               }
+       },
+       {
+               .ident = "P14s Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
+               }
+       },
+       {
+               .ident = "P14s Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
+               }
+       },
        {}
 };
 
+#ifdef CONFIG_SUSPEND
+/*
+ * Lenovo laptops from a variety of generations run a SMI handler during the D3->D0
+ * transition that occurs specifically when exiting suspend to idle which can cause
+ * large delays during resume when the IOMMU translation layer is enabled (the default
+ * behavior) for NVME devices:
+ *
+ * To avoid this firmware problem, skip the SMI handler on these machines before the
+ * D0 transition occurs.
+ */
+static void thinkpad_acpi_amd_s2idle_restore(void)
+{
+       struct resource *res;
+       void __iomem *addr;
+       u8 val;
+
+       res = request_mem_region_muxed(tp_features.quirks->s2idle_bug_mmio, 1,
+                                       "thinkpad_acpi_pm80");
+       if (!res)
+               return;
+
+       addr = ioremap(tp_features.quirks->s2idle_bug_mmio, 1);
+       if (!addr)
+               goto cleanup_resource;
+
+       val = ioread8(addr);
+       iowrite8(val & ~BIT(0), addr);
+
+       iounmap(addr);
+cleanup_resource:
+       release_resource(res);
+}
+
+static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
+       .restore = thinkpad_acpi_amd_s2idle_restore,
+};
+#endif
+
 static const struct pci_device_id fwbug_cards_ids[] __initconst = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
@@ -4419,7 +4550,8 @@ static int __init have_bt_fwbug(void)
         * Some AMD based ThinkPads have a firmware bug that calling
         * "GBDC" will cause bluetooth on Intel wireless cards blocked
         */
-       if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
+       if (tp_features.quirks && tp_features.quirks->btusb_bug &&
+           pci_dev_present(fwbug_cards_ids)) {
                vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
                        FW_BUG "disable bluetooth subdriver for Intel cards\n");
                return 1;
@@ -8748,24 +8880,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
                        fan_status_access_mode = TPACPI_FAN_RD_TPEC;
                        if (quirks & TPACPI_FAN_Q1)
                                fan_quirk1_setup();
-                       if (quirks & TPACPI_FAN_2FAN) {
-                               tp_features.second_fan = 1;
-                               pr_info("secondary fan support enabled\n");
-                       }
-                       if (quirks & TPACPI_FAN_2CTL) {
-                               tp_features.second_fan = 1;
-                               tp_features.second_fan_ctl = 1;
-                               pr_info("secondary fan control enabled\n");
-                       }
                        /* Try and probe the 2nd fan */
+                       tp_features.second_fan = 1; /* needed for get_speed to work */
                        res = fan2_get_speed(&speed);
                        if (res >= 0) {
                                /* It responded - so let's assume it's there */
                                tp_features.second_fan = 1;
                                tp_features.second_fan_ctl = 1;
                                pr_info("secondary fan control detected & enabled\n");
+                       } else {
+                               /* Fan not auto-detected */
+                               tp_features.second_fan = 0;
+                               if (quirks & TPACPI_FAN_2FAN) {
+                                       tp_features.second_fan = 1;
+                                       pr_info("secondary fan support enabled\n");
+                               }
+                               if (quirks & TPACPI_FAN_2CTL) {
+                                       tp_features.second_fan = 1;
+                                       tp_features.second_fan_ctl = 1;
+                                       pr_info("secondary fan control enabled\n");
+                               }
                        }
-
                } else {
                        pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
                        return -ENODEV;
@@ -11455,6 +11590,10 @@ static void thinkpad_acpi_module_exit(void)
 
        tpacpi_lifecycle = TPACPI_LIFE_EXITING;
 
+#ifdef CONFIG_SUSPEND
+       if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio)
+               acpi_unregister_lps0_dev(&thinkpad_acpi_s2idle_dev_ops);
+#endif
        if (tpacpi_hwmon)
                hwmon_device_unregister(tpacpi_hwmon);
        if (tp_features.sensors_pdrv_registered)
@@ -11496,6 +11635,7 @@ static void thinkpad_acpi_module_exit(void)
 
 static int __init thinkpad_acpi_module_init(void)
 {
+       const struct dmi_system_id *dmi_id;
        int ret, i;
 
        tpacpi_lifecycle = TPACPI_LIFE_INIT;
@@ -11535,6 +11675,10 @@ static int __init thinkpad_acpi_module_init(void)
                return -ENODEV;
        }
 
+       dmi_id = dmi_first_match(fwbug_list);
+       if (dmi_id)
+               tp_features.quirks = dmi_id->driver_data;
+
        /* Device initialization */
        tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
                                                        NULL, 0);
@@ -11623,6 +11767,13 @@ static int __init thinkpad_acpi_module_init(void)
                tp_features.input_device_registered = 1;
        }
 
+#ifdef CONFIG_SUSPEND
+       if (tp_features.quirks && tp_features.quirks->s2idle_bug_mmio) {
+               if (!acpi_register_lps0_dev(&thinkpad_acpi_s2idle_dev_ops))
+                       pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
+                               (dmi_id && dmi_id->ident) ? dmi_id->ident : "");
+       }
+#endif
        return 0;
 }
 
index 0feaa4b..dd45471 100644 (file)
@@ -1557,7 +1557,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s)
        start_ns = ktime_set(ts.tv_sec, ts.tv_nsec) + NSEC_PER_MSEC;
        if (!s->start) {
                /* roundup() does not work on 32-bit systems */
-               s->start = DIV_ROUND_UP_ULL(start_ns, s->period);
+               s->start = DIV64_U64_ROUND_UP(start_ns, s->period);
                s->start = ktime_add(s->start, s->phase);
        }
 
index 88abfb5..8ac213a 100644 (file)
@@ -626,8 +626,6 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
                ctcm_clear_busy_do(dev);
        }
 
-       kfree(mpcginfo);
-
        return;
 
 }
@@ -1192,10 +1190,10 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
                                                CTCM_FUNTAIL, dev->name);
                        priv->stats.rx_dropped++;
                        /* mpcginfo only used for non-data transfers */
-                       kfree(mpcginfo);
                        if (do_debug_data)
                                ctcmpc_dump_skb(pskb, -8);
                }
+               kfree(mpcginfo);
        }
 done:
 
@@ -1977,7 +1975,6 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
                }
                break;
        }
-       kfree(mpcginfo);
 
        CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
                __func__, ch->id, grp->outstanding_xid2,
@@ -2038,7 +2035,6 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
                mpc_validate_xid(mpcginfo);
                break;
        }
-       kfree(mpcginfo);
        return;
 }
 
index ded1930..e3813a7 100644 (file)
@@ -39,11 +39,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
        struct ctcm_priv *priv = dev_get_drvdata(dev);
        int rc;
 
-       ndev = priv->channel[CTCM_READ]->netdev;
-       if (!(priv && priv->channel[CTCM_READ] && ndev)) {
+       if (!(priv && priv->channel[CTCM_READ] &&
+             priv->channel[CTCM_READ]->netdev)) {
                CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
                return -ENODEV;
        }
+       ndev = priv->channel[CTCM_READ]->netdev;
 
        rc = kstrtouint(buf, 0, &bs1);
        if (rc)
index bab9b34..84c8981 100644 (file)
@@ -1736,10 +1736,11 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
                        lcs_schedule_recovery(card);
                        break;
                case LCS_CMD_STOPLAN:
-                       pr_warn("Stoplan for %s initiated by LGW\n",
-                               card->dev->name);
-                       if (card->dev)
+                       if (card->dev) {
+                               pr_warn("Stoplan for %s initiated by LGW\n",
+                                       card->dev->name);
                                netif_carrier_off(card->dev);
+                       }
                        break;
                default:
                        LCS_DBF_TEXT(5, trace, "noLGWcmd");
index 37d06f9..1d9be77 100644 (file)
@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
        case SCSI_ACCESS_STATE_OPTIMAL:
        case SCSI_ACCESS_STATE_ACTIVE:
        case SCSI_ACCESS_STATE_LBA:
-               return BLK_STS_OK;
        case SCSI_ACCESS_STATE_TRANSITIONING:
-               return BLK_STS_AGAIN;
+               return BLK_STS_OK;
        default:
                req->rq_flags |= RQF_QUIET;
                return BLK_STS_IOERR;
index ef6e8cd..872a263 100644 (file)
@@ -1330,7 +1330,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
                    LPFC_SLI_INTF_IF_TYPE_0) {
                        /* FLOGI needs to be 3 for WQE FCFI */
-                       ct = ((SLI4_CT_FCFI >> 1) & 1) | (SLI4_CT_FCFI & 1);
+                       ct = SLI4_CT_FCFI;
                        bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
 
                        /* Set the fcfi to the fcfi we registered with */
index bda2a7b..6adaf79 100644 (file)
@@ -10720,10 +10720,10 @@ __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
 
        /* Words 0 - 2 */
        bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
-       bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
-       bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+       bde->addr_low = bpl->addr_low;
+       bde->addr_high = bpl->addr_high;
        bde->type_size = cpu_to_le32(xmit_len);
-       bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BLP_64);
+       bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
 
        /* Word 3 */
        cmdwqe->gen_req.request_payload_len = xmit_len;
index 85dbf81..6dfcfd8 100644 (file)
@@ -3826,6 +3826,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
 
        spin_lock_irqsave(&cmd->cmd_lock, flags);
        if (cmd->aborted) {
+               if (cmd->sg_mapped)
+                       qlt_unmap_sg(vha, cmd);
+
                spin_unlock_irqrestore(&cmd->cmd_lock, flags);
                /*
                 * It's normal to see 2 calls in this path:
index 97eb0de..a6bb0e4 100644 (file)
@@ -1434,10 +1434,7 @@ fb_release(struct inode *inode, struct file *file)
 __acquires(&info->lock)
 __releases(&info->lock)
 {
-       struct fb_info * const info = file_fb_info(file);
-
-       if (!info)
-               return -ENODEV;
+       struct fb_info * const info = file->private_data;
 
        lock_fb_info(info);
        if (info->fbops->fb_release)
index 2689294..82e31a2 100644 (file)
@@ -80,6 +80,10 @@ void framebuffer_release(struct fb_info *info)
 {
        if (!info)
                return;
+
+       if (WARN_ON(refcount_read(&info->count)))
+               return;
+
        kfree(info->apertures);
        kfree(info);
 }
index ea42ba6..b3d5f88 100644 (file)
@@ -243,6 +243,10 @@ error:
 static inline void efifb_show_boot_graphics(struct fb_info *info) {}
 #endif
 
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
 static void efifb_destroy(struct fb_info *info)
 {
        if (efifb_pci_dev)
@@ -254,10 +258,13 @@ static void efifb_destroy(struct fb_info *info)
                else
                        memunmap(info->screen_base);
        }
+
        if (request_mem_succeeded)
                release_mem_region(info->apertures->ranges[0].base,
                                   info->apertures->ranges[0].size);
        fb_dealloc_cmap(&info->cmap);
+
+       framebuffer_release(info);
 }
 
 static const struct fb_ops efifb_ops = {
@@ -620,9 +627,9 @@ static int efifb_remove(struct platform_device *pdev)
 {
        struct fb_info *info = platform_get_drvdata(pdev);
 
+       /* efifb_destroy takes care of info cleanup */
        unregister_framebuffer(info);
        sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
-       framebuffer_release(info);
 
        return 0;
 }
index 94fc9c6..2c19856 100644 (file)
@@ -84,6 +84,10 @@ struct simplefb_par {
 static void simplefb_clocks_destroy(struct simplefb_par *par);
 static void simplefb_regulators_destroy(struct simplefb_par *par);
 
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
 static void simplefb_destroy(struct fb_info *info)
 {
        struct simplefb_par *par = info->par;
@@ -94,6 +98,8 @@ static void simplefb_destroy(struct fb_info *info)
        if (info->screen_base)
                iounmap(info->screen_base);
 
+       framebuffer_release(info);
+
        if (mem)
                release_mem_region(mem->start, resource_size(mem));
 }
@@ -545,8 +551,8 @@ static int simplefb_remove(struct platform_device *pdev)
 {
        struct fb_info *info = platform_get_drvdata(pdev);
 
+       /* simplefb_destroy takes care of info cleanup */
        unregister_framebuffer(info);
-       framebuffer_release(info);
 
        return 0;
 }
index df6de5a..e25e8de 100644 (file)
@@ -179,6 +179,10 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
        return err;
 }
 
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup here.
+ */
 static void vesafb_destroy(struct fb_info *info)
 {
        struct vesafb_par *par = info->par;
@@ -188,6 +192,8 @@ static void vesafb_destroy(struct fb_info *info)
        if (info->screen_base)
                iounmap(info->screen_base);
        release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
+
+       framebuffer_release(info);
 }
 
 static struct fb_ops vesafb_ops = {
@@ -484,10 +490,10 @@ static int vesafb_remove(struct platform_device *pdev)
 {
        struct fb_info *info = platform_get_drvdata(pdev);
 
+       /* vesafb_destroy takes care of info cleanup */
        unregister_framebuffer(info);
        if (((struct vesafb_par *)(info->par))->region)
                release_region(0x3c0, 32);
-       framebuffer_release(info);
 
        return 0;
 }
index aa25bff..b6edcf8 100644 (file)
@@ -85,7 +85,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
        if (folio_test_dirty(folio)) {
                dout("%p dirty_folio %p idx %lu -- already dirty\n",
                     mapping->host, folio, folio->index);
-               BUG_ON(!folio_get_private(folio));
+               VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
                return false;
        }
 
@@ -122,7 +122,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
         * Reference snap context in folio->private.  Also set
         * PagePrivate so that we get invalidate_folio callback.
         */
-       BUG_ON(folio_get_private(folio));
+       VM_BUG_ON_FOLIO(folio_test_private(folio), folio);
        folio_attach_private(folio, snapc);
 
        return ceph_fscache_dirty_folio(mapping, folio);
@@ -150,7 +150,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
        }
 
        WARN_ON(!folio_test_locked(folio));
-       if (folio_get_private(folio)) {
+       if (folio_test_private(folio)) {
                dout("%p invalidate_folio idx %lu full dirty page\n",
                     inode, folio->index);
 
@@ -729,8 +729,11 @@ static void writepages_finish(struct ceph_osd_request *req)
 
        /* clean all pages */
        for (i = 0; i < req->r_num_ops; i++) {
-               if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
+               if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
+                       pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
+                               __func__, req->r_ops[i].op, req, i, req->r_tid);
                        break;
+               }
 
                osd_data = osd_req_op_extent_osd_data(req, i);
                BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
index 6c9e837..8c8226c 100644 (file)
@@ -629,9 +629,15 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
        iinfo.change_attr = 1;
        ceph_encode_timespec64(&iinfo.btime, &now);
 
-       iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
-       iinfo.xattr_data = xattr_buf;
-       memset(iinfo.xattr_data, 0, iinfo.xattr_len);
+       if (req->r_pagelist) {
+               iinfo.xattr_len = req->r_pagelist->length;
+               iinfo.xattr_data = req->r_pagelist->mapped_tail;
+       } else {
+               /* fake it */
+               iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
+               iinfo.xattr_data = xattr_buf;
+               memset(iinfo.xattr_data, 0, iinfo.xattr_len);
+       }
 
        in.ino = cpu_to_le64(vino.ino);
        in.snapid = cpu_to_le64(CEPH_NOSNAP);
@@ -743,6 +749,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
                err = ceph_security_init_secctx(dentry, mode, &as_ctx);
                if (err < 0)
                        goto out_ctx;
+               /* Async create can't handle more than a page of xattrs */
+               if (as_ctx.pagelist &&
+                   !list_is_singular(&as_ctx.pagelist->head))
+                       try_async = false;
        } else if (!d_in_lookup(dentry)) {
                /* If it's not being looked up, it's negative */
                return -ENOENT;
index 591fe9c..1fae019 100644 (file)
@@ -1712,6 +1712,10 @@ static int writeback_single_inode(struct inode *inode,
         */
        if (!(inode->i_state & I_DIRTY_ALL))
                inode_cgwb_move_to_attached(inode, wb);
+       else if (!(inode->i_state & I_SYNC_QUEUED) &&
+                (inode->i_state & I_DIRTY))
+               redirty_tail_locked(inode, wb);
+
        spin_unlock(&wb->list_lock);
        inode_sync_complete(inode);
 out:
index 39080b2..b669733 100644 (file)
@@ -1153,13 +1153,12 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
 
        if (length != written && (iomap->flags & IOMAP_F_NEW)) {
                /* Deallocate blocks that were just allocated. */
-               loff_t blockmask = i_blocksize(inode) - 1;
-               loff_t end = (pos + length) & ~blockmask;
+               loff_t hstart = round_up(pos + written, i_blocksize(inode));
+               loff_t hend = iomap->offset + iomap->length;
 
-               pos = (pos + written + blockmask) & ~blockmask;
-               if (pos < end) {
-                       truncate_pagecache_range(inode, pos, end - 1);
-                       punch_hole(ip, pos, end - pos);
+               if (hstart < hend) {
+                       truncate_pagecache_range(inode, hstart, hend - 1);
+                       punch_hole(ip, hstart, hend - hstart);
                }
        }
 
index 48f0132..2556ae1 100644 (file)
@@ -770,30 +770,27 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
        return ret ? ret : ret1;
 }
 
-static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
+static inline bool should_fault_in_pages(struct iov_iter *i,
+                                        struct kiocb *iocb,
                                         size_t *prev_count,
                                         size_t *window_size)
 {
        size_t count = iov_iter_count(i);
        size_t size, offs;
 
-       if (likely(!count))
-               return false;
-       if (ret <= 0 && ret != -EFAULT)
+       if (!count)
                return false;
        if (!iter_is_iovec(i))
                return false;
 
        size = PAGE_SIZE;
-       offs = offset_in_page(i->iov[0].iov_base + i->iov_offset);
+       offs = offset_in_page(iocb->ki_pos);
        if (*prev_count != count || !*window_size) {
                size_t nr_dirtied;
 
-               size = ALIGN(offs + count, PAGE_SIZE);
-               size = min_t(size_t, size, SZ_1M);
                nr_dirtied = max(current->nr_dirtied_pause -
                                 current->nr_dirtied, 8);
-               size = min(size, nr_dirtied << PAGE_SHIFT);
+               size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
        }
 
        *prev_count = count;
@@ -807,7 +804,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
        struct file *file = iocb->ki_filp;
        struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
        size_t prev_count = 0, window_size = 0;
-       size_t written = 0;
+       size_t read = 0;
        ssize_t ret;
 
        /*
@@ -835,35 +832,31 @@ retry:
        ret = gfs2_glock_nq(gh);
        if (ret)
                goto out_uninit;
-retry_under_glock:
        pagefault_disable();
        to->nofault = true;
        ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
-                          IOMAP_DIO_PARTIAL, written);
+                          IOMAP_DIO_PARTIAL, read);
        to->nofault = false;
        pagefault_enable();
+       if (ret <= 0 && ret != -EFAULT)
+               goto out_unlock;
        if (ret > 0)
-               written = ret;
-
-       if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
-               size_t leftover;
+               read = ret;
 
-               gfs2_holder_allow_demote(gh);
-               leftover = fault_in_iov_iter_writeable(to, window_size);
-               gfs2_holder_disallow_demote(gh);
-               if (leftover != window_size) {
-                       if (gfs2_holder_queued(gh))
-                               goto retry_under_glock;
+       if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+               gfs2_glock_dq(gh);
+               window_size -= fault_in_iov_iter_writeable(to, window_size);
+               if (window_size)
                        goto retry;
-               }
        }
+out_unlock:
        if (gfs2_holder_queued(gh))
                gfs2_glock_dq(gh);
 out_uninit:
        gfs2_holder_uninit(gh);
        if (ret < 0)
                return ret;
-       return written;
+       return read;
 }
 
 static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
@@ -873,7 +866,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
        struct inode *inode = file->f_mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
        size_t prev_count = 0, window_size = 0;
-       size_t read = 0;
+       size_t written = 0;
        ssize_t ret;
 
        /*
@@ -901,39 +894,35 @@ retry:
                goto out_uninit;
        /* Silently fall back to buffered I/O when writing beyond EOF */
        if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
-               goto out;
-retry_under_glock:
+               goto out_unlock;
 
        from->nofault = true;
        ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
-                          IOMAP_DIO_PARTIAL, read);
+                          IOMAP_DIO_PARTIAL, written);
        from->nofault = false;
-
-       if (ret == -ENOTBLK)
-               ret = 0;
+       if (ret <= 0) {
+               if (ret == -ENOTBLK)
+                       ret = 0;
+               if (ret != -EFAULT)
+                       goto out_unlock;
+       }
        if (ret > 0)
-               read = ret;
-
-       if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
-               size_t leftover;
+               written = ret;
 
-               gfs2_holder_allow_demote(gh);
-               leftover = fault_in_iov_iter_readable(from, window_size);
-               gfs2_holder_disallow_demote(gh);
-               if (leftover != window_size) {
-                       if (gfs2_holder_queued(gh))
-                               goto retry_under_glock;
+       if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+               gfs2_glock_dq(gh);
+               window_size -= fault_in_iov_iter_readable(from, window_size);
+               if (window_size)
                        goto retry;
-               }
        }
-out:
+out_unlock:
        if (gfs2_holder_queued(gh))
                gfs2_glock_dq(gh);
 out_uninit:
        gfs2_holder_uninit(gh);
        if (ret < 0)
                return ret;
-       return read;
+       return written;
 }
 
 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -941,7 +930,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
        struct gfs2_inode *ip;
        struct gfs2_holder gh;
        size_t prev_count = 0, window_size = 0;
-       size_t written = 0;
+       size_t read = 0;
        ssize_t ret;
 
        /*
@@ -962,7 +951,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
        if (ret >= 0) {
                if (!iov_iter_count(to))
                        return ret;
-               written = ret;
+               read = ret;
        } else if (ret != -EFAULT) {
                if (ret != -EAGAIN)
                        return ret;
@@ -975,30 +964,26 @@ retry:
        ret = gfs2_glock_nq(&gh);
        if (ret)
                goto out_uninit;
-retry_under_glock:
        pagefault_disable();
        ret = generic_file_read_iter(iocb, to);
        pagefault_enable();
+       if (ret <= 0 && ret != -EFAULT)
+               goto out_unlock;
        if (ret > 0)
-               written += ret;
-
-       if (should_fault_in_pages(ret, to, &prev_count, &window_size)) {
-               size_t leftover;
+               read += ret;
 
-               gfs2_holder_allow_demote(&gh);
-               leftover = fault_in_iov_iter_writeable(to, window_size);
-               gfs2_holder_disallow_demote(&gh);
-               if (leftover != window_size) {
-                       if (gfs2_holder_queued(&gh))
-                               goto retry_under_glock;
+       if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
+               gfs2_glock_dq(&gh);
+               window_size -= fault_in_iov_iter_writeable(to, window_size);
+               if (window_size)
                        goto retry;
-               }
        }
+out_unlock:
        if (gfs2_holder_queued(&gh))
                gfs2_glock_dq(&gh);
 out_uninit:
        gfs2_holder_uninit(&gh);
-       return written ? written : ret;
+       return read ? read : ret;
 }
 
 static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
@@ -1012,7 +997,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
        struct gfs2_holder *statfs_gh = NULL;
        size_t prev_count = 0, window_size = 0;
        size_t orig_count = iov_iter_count(from);
-       size_t read = 0;
+       size_t written = 0;
        ssize_t ret;
 
        /*
@@ -1030,10 +1015,18 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
 retry:
+       if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+               window_size -= fault_in_iov_iter_readable(from, window_size);
+               if (!window_size) {
+                       ret = -EFAULT;
+                       goto out_uninit;
+               }
+               from->count = min(from->count, window_size);
+       }
        ret = gfs2_glock_nq(gh);
        if (ret)
                goto out_uninit;
-retry_under_glock:
+
        if (inode == sdp->sd_rindex) {
                struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 
@@ -1050,25 +1043,19 @@ retry_under_glock:
        current->backing_dev_info = NULL;
        if (ret > 0) {
                iocb->ki_pos += ret;
-               read += ret;
+               written += ret;
        }
 
        if (inode == sdp->sd_rindex)
                gfs2_glock_dq_uninit(statfs_gh);
 
-       from->count = orig_count - read;
-       if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
-               size_t leftover;
-
-               gfs2_holder_allow_demote(gh);
-               leftover = fault_in_iov_iter_readable(from, window_size);
-               gfs2_holder_disallow_demote(gh);
-               if (leftover != window_size) {
-                       from->count = min(from->count, window_size - leftover);
-                       if (gfs2_holder_queued(gh))
-                               goto retry_under_glock;
-                       goto retry;
-               }
+       if (ret <= 0 && ret != -EFAULT)
+               goto out_unlock;
+
+       from->count = orig_count - written;
+       if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
+               gfs2_glock_dq(gh);
+               goto retry;
        }
 out_unlock:
        if (gfs2_holder_queued(gh))
@@ -1077,8 +1064,8 @@ out_uninit:
        gfs2_holder_uninit(gh);
        if (statfs_gh)
                kfree(statfs_gh);
-       from->count = orig_count - read;
-       return read ? read : ret;
+       from->count = orig_count - written;
+       return written ? written : ret;
 }
 
 /**
index e2d59bb..9a16897 100644 (file)
@@ -517,7 +517,7 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
                if (result.negated)
                        ctx->flags &= ~NFS_MOUNT_SOFTREVAL;
                else
-                       ctx->flags &= NFS_MOUNT_SOFTREVAL;
+                       ctx->flags |= NFS_MOUNT_SOFTREVAL;
                break;
        case Opt_posix:
                if (result.negated)
index 9b32b76..a792e21 100644 (file)
@@ -1657,6 +1657,19 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        else
                mnt = path.mnt;
 
+       /*
+        * FAN_RENAME is not allowed on non-dir (for now).
+        * We shouldn't have allowed setting any dirent events in mask of
+        * non-dir, but because we always allowed it, error only if group
+        * was initialized with the new flag FAN_REPORT_TARGET_FID.
+        */
+       ret = -ENOTDIR;
+       if (inode && !S_ISDIR(inode->i_mode) &&
+           ((mask & FAN_RENAME) ||
+            ((mask & FANOTIFY_DIRENT_EVENTS) &&
+             FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID))))
+               goto path_put_and_out;
+
        /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */
        if (mnt || !S_ISDIR(inode->i_mode)) {
                mask &= ~FAN_EVENT_ON_CHILD;
index 172c862..913bef0 100644 (file)
@@ -72,7 +72,7 @@ out:
        return 0;
 }
 
-static int seq_fdinfo_open(struct inode *inode, struct file *file)
+static int proc_fdinfo_access_allowed(struct inode *inode)
 {
        bool allowed = false;
        struct task_struct *task = get_proc_task(inode);
@@ -86,6 +86,16 @@ static int seq_fdinfo_open(struct inode *inode, struct file *file)
        if (!allowed)
                return -EACCES;
 
+       return 0;
+}
+
+static int seq_fdinfo_open(struct inode *inode, struct file *file)
+{
+       int ret = proc_fdinfo_access_allowed(inode);
+
+       if (ret)
+               return ret;
+
        return single_open(file, seq_show, inode);
 }
 
@@ -348,12 +358,23 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
                                  proc_fdinfo_instantiate);
 }
 
+static int proc_open_fdinfo(struct inode *inode, struct file *file)
+{
+       int ret = proc_fdinfo_access_allowed(inode);
+
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
 const struct inode_operations proc_fdinfo_inode_operations = {
        .lookup         = proc_lookupfdinfo,
        .setattr        = proc_setattr,
 };
 
 const struct file_operations proc_fdinfo_operations = {
+       .open           = proc_open_fdinfo,
        .read           = generic_read_dir,
        .iterate_shared = proc_readfdinfo,
        .llseek         = generic_file_llseek,
index 0ed4861..b3d5f97 100644 (file)
@@ -75,11 +75,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
 
        if (fileident) {
                if (adinicb || (offset + lfi < 0)) {
-                       memcpy(udf_get_fi_ident(sfi), fileident, lfi);
+                       memcpy(sfi->impUse + liu, fileident, lfi);
                } else if (offset >= 0) {
                        memcpy(fibh->ebh->b_data + offset, fileident, lfi);
                } else {
-                       memcpy(udf_get_fi_ident(sfi), fileident, -offset);
+                       memcpy(sfi->impUse + liu, fileident, -offset);
                        memcpy(fibh->ebh->b_data, fileident - offset,
                                lfi + offset);
                }
@@ -88,11 +88,11 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
        offset += lfi;
 
        if (adinicb || (offset + padlen < 0)) {
-               memset(udf_get_fi_ident(sfi) + lfi, 0x00, padlen);
+               memset(sfi->impUse + liu + lfi, 0x00, padlen);
        } else if (offset >= 0) {
                memset(fibh->ebh->b_data + offset, 0x00, padlen);
        } else {
-               memset(udf_get_fi_ident(sfi) + lfi, 0x00, -offset);
+               memset(sfi->impUse + liu + lfi, 0x00, -offset);
                memset(fibh->ebh->b_data, 0x00, padlen + offset);
        }
 
index 2c6b9e4..7c2d77d 100644 (file)
@@ -169,7 +169,7 @@ enum {
 #define NETIF_F_HW_HSR_FWD     __NETIF_F(HW_HSR_FWD)
 #define NETIF_F_HW_HSR_DUP     __NETIF_F(HW_HSR_DUP)
 
-/* Finds the next feature with the highest number of the range of start till 0.
+/* Finds the next feature with the highest number of the range of start-1 till 0.
  */
 static inline int find_next_netdev_feature(u64 feature, unsigned long start)
 {
@@ -188,7 +188,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
        for ((bit) = find_next_netdev_feature((mask_addr),              \
                                              NETDEV_FEATURE_COUNT);    \
             (bit) >= 0;                                                \
-            (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+            (bit) = find_next_netdev_feature((mask_addr), (bit)))
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
index db51495..9050140 100644 (file)
@@ -160,7 +160,7 @@ struct rpc_add_xprt_test {
 #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT     (1UL << 9)
 #define RPC_CLNT_CREATE_SOFTERR                (1UL << 10)
 #define RPC_CLNT_CREATE_REUSEPORT      (1UL << 11)
-#define RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL (1UL << 12)
+#define RPC_CLNT_CREATE_CONNECTED      (1UL << 12)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
 struct rpc_clnt        *rpc_bind_new_program(struct rpc_clnt *,
index 8abd082..62d7b81 100644 (file)
@@ -36,6 +36,9 @@
 /* HCI priority */
 #define HCI_PRIO_MAX   7
 
+/* HCI maximum id value */
+#define HCI_MAX_ID 10000
+
 /* HCI Core structures */
 struct inquiry_data {
        bdaddr_t        bdaddr;
index 748cf87..3e02709 100644 (file)
@@ -14,6 +14,7 @@ struct tcf_pedit {
        struct tc_action        common;
        unsigned char           tcfp_nkeys;
        unsigned char           tcfp_flags;
+       u32                     tcfp_off_max_hint;
        struct tc_pedit_key     *tcfp_keys;
        struct tcf_pedit_key_ex *tcfp_keys_ex;
 };
index 7b2bf9b..de26c99 100644 (file)
@@ -681,7 +681,6 @@ struct ocelot_vcap_id {
 
 struct ocelot_vcap_filter {
        struct list_head list;
-       struct list_head trap_list;
 
        enum ocelot_vcap_filter_type type;
        int block_id;
@@ -695,6 +694,7 @@ struct ocelot_vcap_filter {
        struct ocelot_vcap_stats stats;
        /* For VCAP IS1 and IS2 */
        bool take_ts;
+       bool is_trap;
        unsigned long ingress_port_mask;
        /* For VCAP ES0 */
        struct ocelot_vcap_port ingress_port;
index 283c5a7..db6c858 100644 (file)
@@ -184,7 +184,7 @@ struct rfkill_event_ext {
 #define RFKILL_IOC_NOINPUT     1
 #define RFKILL_IOCTL_NOINPUT   _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT)
 #define RFKILL_IOC_MAX_SIZE    2
-#define RFKILL_IOCTL_MAX_SIZE  _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_EXT_SIZE, __u32)
+#define RFKILL_IOCTL_MAX_SIZE  _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_MAX_SIZE, __u32)
 
 /* and that's all userspace gets */
 
index 80d76b7..7aa2eb7 100644 (file)
  * Virtio Transitional IDs
  */
 
-#define VIRTIO_TRANS_ID_NET            1000 /* transitional virtio net */
-#define VIRTIO_TRANS_ID_BLOCK          1001 /* transitional virtio block */
-#define VIRTIO_TRANS_ID_BALLOON                1002 /* transitional virtio balloon */
-#define VIRTIO_TRANS_ID_CONSOLE                1003 /* transitional virtio console */
-#define VIRTIO_TRANS_ID_SCSI           1004 /* transitional virtio SCSI */
-#define VIRTIO_TRANS_ID_RNG            1005 /* transitional virtio rng */
-#define VIRTIO_TRANS_ID_9P             1009 /* transitional virtio 9p console */
+#define VIRTIO_TRANS_ID_NET            0x1000 /* transitional virtio net */
+#define VIRTIO_TRANS_ID_BLOCK          0x1001 /* transitional virtio block */
+#define VIRTIO_TRANS_ID_BALLOON                0x1002 /* transitional virtio balloon */
+#define VIRTIO_TRANS_ID_CONSOLE                0x1003 /* transitional virtio console */
+#define VIRTIO_TRANS_ID_SCSI           0x1004 /* transitional virtio SCSI */
+#define VIRTIO_TRANS_ID_RNG            0x1005 /* transitional virtio rng */
+#define VIRTIO_TRANS_ID_9P             0x1009 /* transitional virtio 9p console */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
index 9390bfd..71a4188 100644 (file)
@@ -3390,8 +3390,11 @@ static struct notifier_block cpuset_track_online_nodes_nb = {
  */
 void __init cpuset_init_smp(void)
 {
-       cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
-       top_cpuset.mems_allowed = node_states[N_MEMORY];
+       /*
+        * cpus_allowd/mems_allowed set to v2 values in the initial
+        * cpuset_bind() call will be reset to v1 values in another
+        * cpuset_bind() call when v1 cpuset is mounted.
+        */
        top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
 
        cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
index 06811d8..53f6b9c 100644 (file)
  *        Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
  */
 #define NET_DIM_PARAMS_NUM_PROFILES 5
-#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
 #define NET_DIM_DEF_PROFILE_CQE 1
 #define NET_DIM_DEF_PROFILE_EQE 1
 
 #define NET_DIM_RX_EQE_PROFILES { \
-       {1,   NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-       {8,   NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-       {64,  NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-       {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-       {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+       {.usec = 1,   .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+       {.usec = 8,   .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+       {.usec = 64,  .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+       {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+       {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}  \
 }
 
 #define NET_DIM_RX_CQE_PROFILES { \
-       {2,  256},             \
-       {8,  128},             \
-       {16, 64},              \
-       {32, 64},              \
-       {64, 64}               \
+       {.usec = 2,  .pkts = 256,},             \
+       {.usec = 8,  .pkts = 128,},             \
+       {.usec = 16, .pkts = 64,},              \
+       {.usec = 32, .pkts = 64,},              \
+       {.usec = 64, .pkts = 64,}               \
 }
 
 #define NET_DIM_TX_EQE_PROFILES { \
-       {1,   NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE},  \
-       {8,   NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE},  \
-       {32,  NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE},  \
-       {64,  NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE},  \
-       {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}   \
+       {.usec = 1,   .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
+       {.usec = 8,   .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
+       {.usec = 32,  .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
+       {.usec = 64,  .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
+       {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}   \
 }
 
 #define NET_DIM_TX_CQE_PROFILES { \
-       {5,  128},  \
-       {8,  64},  \
-       {16, 32},  \
-       {32, 32},  \
-       {64, 32}   \
+       {.usec = 5,  .pkts = 128,},  \
+       {.usec = 8,  .pkts = 64,},  \
+       {.usec = 16, .pkts = 32,},  \
+       {.usec = 32, .pkts = 32,},  \
+       {.usec = 64, .pkts = 32,}   \
 }
 
 static const struct dim_cq_moder
index c468fee..910a138 100644 (file)
@@ -2495,11 +2495,16 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        struct address_space *mapping = NULL;
        int extra_pins, ret;
        pgoff_t end;
+       bool is_hzp;
 
-       VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
        VM_BUG_ON_PAGE(!PageLocked(head), head);
        VM_BUG_ON_PAGE(!PageCompound(head), head);
 
+       is_hzp = is_huge_zero_page(head);
+       VM_WARN_ON_ONCE_PAGE(is_hzp, head);
+       if (is_hzp)
+               return -EBUSY;
+
        if (PageWriteback(head))
                return -EBUSY;
 
index 9b2b5f5..11a9547 100644 (file)
@@ -621,6 +621,16 @@ static bool __init kfence_init_pool_early(void)
         * fails for the first page, and therefore expect addr==__kfence_pool in
         * most failure cases.
         */
+       for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
+               struct slab *slab = virt_to_slab(p);
+
+               if (!slab)
+                       continue;
+#ifdef CONFIG_MEMCG
+               slab->memcg_data = 0;
+#endif
+               __folio_clear_slab(slab_folio(slab));
+       }
        memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
        __kfence_pool = NULL;
        return false;
index 27760c1..d4a4adc 100644 (file)
@@ -1274,7 +1274,7 @@ try_again:
        }
 out:
        if (ret == -EIO)
-               dump_page(p, "hwpoison: unhandlable page");
+               pr_err("Memory failure: %#lx: unhandlable page.\n", page_to_pfn(p));
 
        return ret;
 }
@@ -1861,19 +1861,6 @@ try_again:
 
        if (PageTransHuge(hpage)) {
                /*
-                * Bail out before SetPageHasHWPoisoned() if hpage is
-                * huge_zero_page, although PG_has_hwpoisoned is not
-                * checked in set_huge_zero_page().
-                *
-                * TODO: Handle memory failure of huge_zero_page thoroughly.
-                */
-               if (is_huge_zero_page(hpage)) {
-                       action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
-                       res = -EBUSY;
-                       goto unlock_mutex;
-               }
-
-               /*
                 * The flag must be set after the refcount is bumped
                 * otherwise it may race with THP split.
                 * And the flag can't be set in get_hwpoison_page() since
index 303d329..0b93fac 100644 (file)
@@ -947,7 +947,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
                return -EINTR;
        vma = vma_lookup(mm, addr);
        if (!vma) {
-               ret = EFAULT;
+               ret = -EFAULT;
                goto out;
        }
 
index 0899a72..c120c7c 100644 (file)
@@ -475,6 +475,17 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                goto free_skb;
        }
 
+       /* GRO might have added fragments to the fragment list instead of
+        * frags[]. But this is not handled by skb_split and must be
+        * linearized to avoid incorrect length information after all
+        * batman-adv fragments were created and submitted to the
+        * hard-interface
+        */
+       if (skb_has_frag_list(skb) && __skb_linearize(skb)) {
+               ret = -ENOMEM;
+               goto free_skb;
+       }
+
        /* Create one header to be copied to all fragments */
        frag_header.packet_type = BATADV_UNICAST_FRAG;
        frag_header.version = BATADV_COMPAT_VERSION;
index b4782a6..45c2dd2 100644 (file)
@@ -2555,10 +2555,10 @@ int hci_register_dev(struct hci_dev *hdev)
         */
        switch (hdev->dev_type) {
        case HCI_PRIMARY:
-               id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
+               id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
                break;
        case HCI_AMP:
-               id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
+               id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
                break;
        default:
                return -EINVAL;
@@ -2567,7 +2567,7 @@ int hci_register_dev(struct hci_dev *hdev)
        if (id < 0)
                return id;
 
-       sprintf(hdev->name, "hci%d", id);
+       snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
        hdev->id = id;
 
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
index 30b523f..c90c74d 100644 (file)
@@ -3897,7 +3897,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
        unsigned int delta_len = 0;
        struct sk_buff *tail = NULL;
        struct sk_buff *nskb, *tmp;
-       int err;
+       int len_diff, err;
 
        skb_push(skb, -skb_network_offset(skb) + offset);
 
@@ -3937,9 +3937,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
                skb_push(nskb, -skb_network_offset(nskb) + offset);
 
                skb_release_head_state(nskb);
+               len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
                __copy_skb_header(nskb, skb);
 
                skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
+               nskb->transport_header += len_diff;
                skb_copy_from_linear_data_offset(skb, -tnl_hlen,
                                                 nskb->data - tnl_hlen,
                                                 offset + tnl_hlen);
index 0ee7d4c..a09ba64 100644 (file)
@@ -854,7 +854,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
        memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
 
        if (dn_db->router) {
-               struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
+               struct dn_neigh *dn = container_of(dn_db->router, struct dn_neigh, n);
                dn_dn2eth(msg->neighbor, dn->addr);
        }
 
@@ -902,7 +902,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 {
        int n;
        struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
-       struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
+       struct dn_neigh *dn = container_of(dn_db->router, struct dn_neigh, n);
        struct sk_buff *skb;
        size_t size;
        unsigned char *ptr;
index 94b306f..fbd98ac 100644 (file)
@@ -426,7 +426,8 @@ int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
                        if (!dn_db->router) {
                                dn_db->router = neigh_clone(neigh);
                        } else {
-                               if (msg->priority > ((struct dn_neigh *)dn_db->router)->priority)
+                               if (msg->priority > container_of(dn_db->router,
+                                                                struct dn_neigh, n)->priority)
                                        neigh_release(xchg(&dn_db->router, neigh_clone(neigh)));
                        }
                }
index 7e85f2a..d1d78a4 100644 (file)
@@ -1120,7 +1120,7 @@ source_ok:
                /* Ok then, we assume its directly connected and move on */
 select_source:
                if (neigh)
-                       gateway = ((struct dn_neigh *)neigh)->addr;
+                       gateway = container_of(neigh, struct dn_neigh, n)->addr;
                if (gateway == 0)
                        gateway = fld.daddr;
                if (fld.saddr == 0) {
@@ -1429,7 +1429,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
                /* Use the default router if there is one */
                neigh = neigh_clone(dn_db->router);
                if (neigh) {
-                       gateway = ((struct dn_neigh *)neigh)->addr;
+                       gateway = container_of(neigh, struct dn_neigh, n)->addr;
                        goto make_route;
                }
 
index cdc56ba..bdccb61 100644 (file)
@@ -451,6 +451,7 @@ out_rollback_unoffload:
        switchdev_bridge_port_unoffload(brport_dev, dp,
                                        &dsa_slave_switchdev_notifier,
                                        &dsa_slave_switchdev_blocking_notifier);
+       dsa_flush_workqueue();
 out_rollback_unbridge:
        dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
 out_rollback:
index 3ee9475..aa9a11b 100644 (file)
@@ -305,6 +305,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
        struct net *net = sock_net(sk);
        if (sk->sk_family == AF_INET) {
                struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+               u32 tb_id = RT_TABLE_LOCAL;
                int chk_addr_ret;
 
                if (addr_len < sizeof(*addr))
@@ -318,7 +319,8 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
                         sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
 
-               chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+               tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
+               chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
                if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk),
                                                 addr->sin_addr.s_addr,
@@ -355,6 +357,14 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                                return -ENODEV;
                        }
                }
+
+               if (!dev && sk->sk_bound_dev_if) {
+                       dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+                       if (!dev) {
+                               rcu_read_unlock();
+                               return -ENODEV;
+                       }
+               }
                has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
                                                    scoped);
                rcu_read_unlock();
index 98c6f34..57abd27 100644 (file)
@@ -1753,6 +1753,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 #endif
        RT_CACHE_STAT_INC(in_slow_mc);
 
+       skb_dst_drop(skb);
        skb_dst_set(skb, &rth->dst);
        return 0;
 }
index 1b30c72..dc8aec1 100644 (file)
@@ -3657,6 +3657,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                                cbss->transmitted_bss->bssid);
                bss_conf->bssid_indicator = cbss->max_bssid_indicator;
                bss_conf->bssid_index = cbss->bssid_index;
+       } else {
+               bss_conf->nontransmitted = false;
+               memset(bss_conf->transmitter_bssid, 0,
+                      sizeof(bss_conf->transmitter_bssid));
+               bss_conf->bssid_indicator = 0;
+               bss_conf->bssid_index = 0;
        }
 
        /*
index beb6b92..88d797f 100644 (file)
@@ -1405,8 +1405,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
                goto dont_reorder;
 
        /* not part of a BA session */
-       if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
-           ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+       if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
                goto dont_reorder;
 
        /* new, potentially un-ordered, ampdu frame - process it */
index 05a3795..73e9c0a 100644 (file)
@@ -1975,7 +1975,6 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                copied = len;
        }
 
-       skb_reset_transport_header(data_skb);
        err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
 
        if (msg->msg_name) {
index 2f638f8..73ee277 100644 (file)
@@ -487,11 +487,11 @@ struct rds_tcp_net {
 /* All module specific customizations to the RDS-TCP socket should be done in
  * rds_tcp_tune() and applied after socket creation.
  */
-void rds_tcp_tune(struct socket *sock)
+bool rds_tcp_tune(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
-       struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+       struct rds_tcp_net *rtn;
 
        tcp_sock_set_nodelay(sock->sk);
        lock_sock(sk);
@@ -499,10 +499,15 @@ void rds_tcp_tune(struct socket *sock)
         * a process which created this net namespace terminated.
         */
        if (!sk->sk_net_refcnt) {
+               if (!maybe_get_net(net)) {
+                       release_sock(sk);
+                       return false;
+               }
                sk->sk_net_refcnt = 1;
-               get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+               netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
                sock_inuse_add(net, 1);
        }
+       rtn = net_generic(net, rds_tcp_netid);
        if (rtn->sndbuf_size > 0) {
                sk->sk_sndbuf = rtn->sndbuf_size;
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
@@ -512,6 +517,7 @@ void rds_tcp_tune(struct socket *sock)
                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
        }
        release_sock(sk);
+       return true;
 }
 
 static void rds_tcp_accept_worker(struct work_struct *work)
index dc8d745..f8b5930 100644 (file)
@@ -49,7 +49,7 @@ struct rds_tcp_statistics {
 };
 
 /* tcp.c */
-void rds_tcp_tune(struct socket *sock);
+bool rds_tcp_tune(struct socket *sock);
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
 void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
 void rds_tcp_restore_callbacks(struct socket *sock,
index 5461d77..f0c477c 100644 (file)
@@ -124,7 +124,10 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
        if (ret < 0)
                goto out;
 
-       rds_tcp_tune(sock);
+       if (!rds_tcp_tune(sock)) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        if (isv6) {
                sin6.sin6_family = AF_INET6;
index 09cadd5..7edf2e6 100644 (file)
@@ -133,7 +133,10 @@ int rds_tcp_accept_one(struct socket *sock)
        __module_get(new_sock->ops->owner);
 
        rds_tcp_keepalive(new_sock);
-       rds_tcp_tune(new_sock);
+       if (!rds_tcp_tune(new_sock)) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        inet = inet_sk(new_sock->sk);
 
index 31fcd27..0eaaf1f 100644 (file)
@@ -149,7 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        struct nlattr *pattr;
        struct tcf_pedit *p;
        int ret = 0, err;
-       int ksize;
+       int i, ksize;
        u32 index;
 
        if (!nla) {
@@ -228,6 +228,18 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                p->tcfp_nkeys = parm->nkeys;
        }
        memcpy(p->tcfp_keys, parm->keys, ksize);
+       p->tcfp_off_max_hint = 0;
+       for (i = 0; i < p->tcfp_nkeys; ++i) {
+               u32 cur = p->tcfp_keys[i].off;
+
+               /* The AT option can read a single byte, we can bound the actual
+                * value with uchar max.
+                */
+               cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift;
+
+               /* Each key touches 4 bytes starting from the computed offset */
+               p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4);
+       }
 
        p->tcfp_flags = parm->flags;
        goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -308,13 +320,18 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a,
                         struct tcf_result *res)
 {
        struct tcf_pedit *p = to_pedit(a);
+       u32 max_offset;
        int i;
 
-       if (skb_unclone(skb, GFP_ATOMIC))
-               return p->tcf_action;
-
        spin_lock(&p->tcf_lock);
 
+       max_offset = (skb_transport_header_was_set(skb) ?
+                     skb_transport_offset(skb) :
+                     skb_network_offset(skb)) +
+                    p->tcfp_off_max_hint;
+       if (skb_ensure_writable(skb, min(skb->len, max_offset)))
+               goto unlock;
+
        tcf_lastuse_update(&p->tcf_tm);
 
        if (p->tcfp_nkeys > 0) {
@@ -403,6 +420,7 @@ bad:
        p->tcf_qstats.overlimits++;
 done:
        bstats_update(&p->tcf_bstats, skb);
+unlock:
        spin_unlock(&p->tcf_lock);
        return p->tcf_action;
 }
index 51e8eb2..338b9ef 100644 (file)
@@ -355,12 +355,12 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
                                }
                                break;
                        }
+                       if (!timeo)
+                               return -EAGAIN;
                        if (signal_pending(current)) {
                                read_done = sock_intr_errno(timeo);
                                break;
                        }
-                       if (!timeo)
-                               return -EAGAIN;
                }
 
                if (!smc_rx_data_available(conn)) {
index 8ca1d80..f549e4c 100644 (file)
@@ -97,7 +97,8 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
                 * timeout, which would result in reconnections being
                 * done without the correct namespace:
                 */
-               .flags          = RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL |
+               .flags          = RPC_CLNT_CREATE_NOPING |
+                                 RPC_CLNT_CREATE_CONNECTED |
                                  RPC_CLNT_CREATE_NO_IDLE_TIMEOUT
        };
        struct rpc_clnt *clnt;
index 22c28cf..e2c6eca 100644 (file)
@@ -76,6 +76,7 @@ static int    rpc_encode_header(struct rpc_task *task,
 static int     rpc_decode_header(struct rpc_task *task,
                                  struct xdr_stream *xdr);
 static int     rpc_ping(struct rpc_clnt *clnt);
+static int     rpc_ping_noreply(struct rpc_clnt *clnt);
 static void    rpc_check_timeout(struct rpc_task *task);
 
 static void rpc_register_client(struct rpc_clnt *clnt)
@@ -479,9 +480,12 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
 
        if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
                int err = rpc_ping(clnt);
-               if ((args->flags & RPC_CLNT_CREATE_IGNORE_NULL_UNAVAIL) &&
-                   err == -EOPNOTSUPP)
-                       err = 0;
+               if (err != 0) {
+                       rpc_shutdown_client(clnt);
+                       return ERR_PTR(err);
+               }
+       } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) {
+               int err = rpc_ping_noreply(clnt);
                if (err != 0) {
                        rpc_shutdown_client(clnt);
                        return ERR_PTR(err);
@@ -2712,6 +2716,10 @@ static const struct rpc_procinfo rpcproc_null = {
        .p_decode = rpcproc_decode_null,
 };
 
+static const struct rpc_procinfo rpcproc_null_noreply = {
+       .p_encode = rpcproc_encode_null,
+};
+
 static void
 rpc_null_call_prepare(struct rpc_task *task, void *data)
 {
@@ -2765,6 +2773,28 @@ static int rpc_ping(struct rpc_clnt *clnt)
        return status;
 }
 
+static int rpc_ping_noreply(struct rpc_clnt *clnt)
+{
+       struct rpc_message msg = {
+               .rpc_proc = &rpcproc_null_noreply,
+       };
+       struct rpc_task_setup task_setup_data = {
+               .rpc_client = clnt,
+               .rpc_message = &msg,
+               .callback_ops = &rpc_null_ops,
+               .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
+       };
+       struct rpc_task *task;
+       int status;
+
+       task = rpc_run_task(&task_setup_data);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+       status = task->tk_status;
+       rpc_put_task(task);
+       return status;
+}
+
 struct rpc_cb_add_xprt_calldata {
        struct rpc_xprt_switch *xps;
        struct rpc_xprt *xprt;
index af875ad..3919fe2 100644 (file)
@@ -1347,7 +1347,10 @@ static int tls_device_down(struct net_device *netdev)
 
                /* Device contexts for RX and TX will be freed in on sk_destruct
                 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+                * Now release the ref taken above.
                 */
+               if (refcount_dec_and_test(&ctx->refcount))
+                       tls_device_free_ctx(ctx);
        }
 
        up_write(&device_offload_lock);
index 21e808f..1a3551b 100644 (file)
@@ -3173,6 +3173,15 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
        } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
                chandef->width =
                        nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
+               if (chandef->chan->band == NL80211_BAND_S1GHZ) {
+                       /* User input error for channel width doesn't match channel  */
+                       if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) {
+                               NL_SET_ERR_MSG_ATTR(extack,
+                                                   attrs[NL80211_ATTR_CHANNEL_WIDTH],
+                                                   "bad channel width");
+                               return -EINVAL;
+                       }
+               }
                if (attrs[NL80211_ATTR_CENTER_FREQ1]) {
                        chandef->center_freq1 =
                                nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
@@ -11657,18 +11666,23 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
        struct cfg80211_bitrate_mask mask;
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
 
        if (!rdev->ops->set_bitrate_mask)
                return -EOPNOTSUPP;
 
+       wdev_lock(wdev);
        err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
                                            NL80211_ATTR_TX_RATES, &mask,
                                            dev, true);
        if (err)
-               return err;
+               goto out;
 
-       return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+       err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
+out:
+       wdev_unlock(wdev);
+       return err;
 }
 
 static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
index 4a6d864..6d82bd9 100644 (file)
@@ -1829,7 +1829,7 @@ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
                if (tmp && tmp->datalen >= sizeof(struct ieee80211_s1g_oper_ie)) {
                        struct ieee80211_s1g_oper_ie *s1gop = (void *)tmp->data;
 
-                       return s1gop->primary_ch;
+                       return s1gop->oper_ch;
                }
        } else {
                tmp = cfg80211_find_elem(WLAN_EID_DS_PARAMS, ie, ielen);
index 0f2ebc3..e1f998d 100644 (file)
@@ -25,6 +25,7 @@ TEST_PROGS += bareudp.sh
 TEST_PROGS += amt.sh
 TEST_PROGS += unicast_extensions.sh
 TEST_PROGS += udpgro_fwd.sh
+TEST_PROGS += udpgro_frglist.sh
 TEST_PROGS += veth.sh
 TEST_PROGS += ioam6.sh
 TEST_PROGS += gro.sh
@@ -61,6 +62,8 @@ TEST_FILES := settings
 KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
+include bpf/Makefile
+
 $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
 $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
 $(OUTPUT)/tcp_inq: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile
new file mode 100644 (file)
index 0000000..f91bf14
--- /dev/null
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CLANG ?= clang
+CCINCLUDE += -I../../bpf
+CCINCLUDE += -I../../../../../usr/include/
+
+TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
+all: $(TEST_CUSTOM_PROGS)
+
+$(OUTPUT)/%.o: %.c
+       $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@
+
+clean:
+       rm -f $(TEST_CUSTOM_PROGS)
diff --git a/tools/testing/selftests/net/bpf/nat6to4.c b/tools/testing/selftests/net/bpf/nat6to4.c
new file mode 100644 (file)
index 0000000..ac54c36
--- /dev/null
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This code is taken from the Android Open Source Project and the author
+ * (Maciej Żenczykowski) has gave permission to relicense it under the
+ * GPLv2. Therefore this program is free software;
+ * You can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation
+
+ * The original headers, including the original license headers, are
+ * included below for completeness.
+ *
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <linux/bpf.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/pkt_cls.h>
+#include <linux/swab.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+
+#include <linux/udp.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define IP_DF 0x4000  // Flag: "Don't Fragment"
+
+SEC("schedcls/ingress6/nat_6")
+int sched_cls_ingress6_nat_6_prog(struct __sk_buff *skb)
+{
+       const int l2_header_size =  sizeof(struct ethhdr);
+       void *data = (void *)(long)skb->data;
+       const void *data_end = (void *)(long)skb->data_end;
+       const struct ethhdr * const eth = data;  // used iff is_ethernet
+       const struct ipv6hdr * const ip6 =  (void *)(eth + 1);
+
+       // Require ethernet dst mac address to be our unicast address.
+       if  (skb->pkt_type != PACKET_HOST)
+               return TC_ACT_OK;
+
+       // Must be meta-ethernet IPv6 frame
+       if (skb->protocol != bpf_htons(ETH_P_IPV6))
+               return TC_ACT_OK;
+
+       // Must have (ethernet and) ipv6 header
+       if (data + l2_header_size + sizeof(*ip6) > data_end)
+               return TC_ACT_OK;
+
+       // Ethertype - if present - must be IPv6
+       if (eth->h_proto != bpf_htons(ETH_P_IPV6))
+               return TC_ACT_OK;
+
+       // IP version must be 6
+       if (ip6->version != 6)
+               return TC_ACT_OK;
+       // Maximum IPv6 payload length that can be translated to IPv4
+       if (bpf_ntohs(ip6->payload_len) > 0xFFFF - sizeof(struct iphdr))
+               return TC_ACT_OK;
+       switch (ip6->nexthdr) {
+       case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6
+       case IPPROTO_UDP:  // address means there is no need to update their checksums.
+       case IPPROTO_GRE:  // We do not need to bother looking at GRE/ESP headers,
+       case IPPROTO_ESP:  // since there is never a checksum to update.
+               break;
+       default:  // do not know how to handle anything else
+               return TC_ACT_OK;
+       }
+
+       struct ethhdr eth2;  // used iff is_ethernet
+
+       eth2 = *eth;                     // Copy over the ethernet header (src/dst mac)
+       eth2.h_proto = bpf_htons(ETH_P_IP);  // But replace the ethertype
+
+       struct iphdr ip = {
+               .version = 4,                                                      // u4
+               .ihl = sizeof(struct iphdr) / sizeof(__u32),                       // u4
+               .tos = (ip6->priority << 4) + (ip6->flow_lbl[0] >> 4),             // u8
+               .tot_len = bpf_htons(bpf_ntohs(ip6->payload_len) + sizeof(struct iphdr)),  // u16
+               .id = 0,                                                           // u16
+               .frag_off = bpf_htons(IP_DF),                                          // u16
+               .ttl = ip6->hop_limit,                                             // u8
+               .protocol = ip6->nexthdr,                                          // u8
+               .check = 0,                                                        // u16
+               .saddr = 0x0201a8c0,                            // u32
+               .daddr = 0x0101a8c0,                                         // u32
+       };
+
+       // Calculate the IPv4 one's complement checksum of the IPv4 header.
+       __wsum sum4 = 0;
+
+       for (int i = 0; i < sizeof(ip) / sizeof(__u16); ++i)
+               sum4 += ((__u16 *)&ip)[i];
+
+       // Note that sum4 is guaranteed to be non-zero by virtue of ip.version == 4
+       sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE
+       sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16
+       ip.check = (__u16)~sum4;                // sum4 cannot be zero, so this is never 0xFFFF
+
+       // Calculate the *negative* IPv6 16-bit one's complement checksum of the IPv6 header.
+       __wsum sum6 = 0;
+       // We'll end up with a non-zero sum due to ip6->version == 6 (which has '0' bits)
+       for (int i = 0; i < sizeof(*ip6) / sizeof(__u16); ++i)
+               sum6 += ~((__u16 *)ip6)[i];  // note the bitwise negation
+
+       // Note that there is no L4 checksum update: we are relying on the checksum neutrality
+       // of the ipv6 address chosen by netd's ClatdController.
+
+       // Packet mutations begin - point of no return, but if this first modification fails
+       // the packet is probably still pristine, so let clatd handle it.
+       if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IP), 0))
+               return TC_ACT_OK;
+       bpf_csum_update(skb, sum6);
+
+       data = (void *)(long)skb->data;
+       data_end = (void *)(long)skb->data_end;
+       if (data + l2_header_size + sizeof(struct iphdr) > data_end)
+               return TC_ACT_SHOT;
+
+       struct ethhdr *new_eth = data;
+
+       // Copy over the updated ethernet header
+       *new_eth = eth2;
+
+       // Copy over the new ipv4 header.
+       *(struct iphdr *)(new_eth + 1) = ip;
+       return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
+}
+
+SEC("schedcls/egress4/snat4")
+int sched_cls_egress4_snat4_prog(struct __sk_buff *skb)
+{
+       const int l2_header_size =  sizeof(struct ethhdr);
+       void *data = (void *)(long)skb->data;
+       const void *data_end = (void *)(long)skb->data_end;
+       const struct ethhdr *const eth = data;  // used iff is_ethernet
+       const struct iphdr *const ip4 = (void *)(eth + 1);
+
+       // Must be meta-ethernet IPv4 frame
+       if (skb->protocol != bpf_htons(ETH_P_IP))
+               return TC_ACT_OK;
+
+       // Must have ipv4 header
+       if (data + l2_header_size + sizeof(struct ipv6hdr) > data_end)
+               return TC_ACT_OK;
+
+       // Ethertype - if present - must be IPv4
+       if (eth->h_proto != bpf_htons(ETH_P_IP))
+               return TC_ACT_OK;
+
+       // IP version must be 4
+       if (ip4->version != 4)
+               return TC_ACT_OK;
+
+       // We cannot handle IP options, just standard 20 byte == 5 dword minimal IPv4 header
+       if (ip4->ihl != 5)
+               return TC_ACT_OK;
+
+       // Maximum IPv6 payload length that can be translated to IPv4
+       if (bpf_htons(ip4->tot_len) > 0xFFFF - sizeof(struct ipv6hdr))
+               return TC_ACT_OK;
+
+       // Calculate the IPv4 one's complement checksum of the IPv4 header.
+       __wsum sum4 = 0;
+
+       for (int i = 0; i < sizeof(*ip4) / sizeof(__u16); ++i)
+               sum4 += ((__u16 *)ip4)[i];
+
+       // Note that sum4 is guaranteed to be non-zero by virtue of ip4->version == 4
+       sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse u32 into range 1 .. 0x1FFFE
+       sum4 = (sum4 & 0xFFFF) + (sum4 >> 16);  // collapse any potential carry into u16
+       // for a correct checksum we should get *a* zero, but sum4 must be positive, ie 0xFFFF
+       if (sum4 != 0xFFFF)
+               return TC_ACT_OK;
+
+       // Minimum IPv4 total length is the size of the header
+       if (bpf_ntohs(ip4->tot_len) < sizeof(*ip4))
+               return TC_ACT_OK;
+
+       // We are incapable of dealing with IPv4 fragments
+       if (ip4->frag_off & ~bpf_htons(IP_DF))
+               return TC_ACT_OK;
+
+       switch (ip4->protocol) {
+       case IPPROTO_TCP:  // For TCP & UDP the checksum neutrality of the chosen IPv6
+       case IPPROTO_GRE:  // address means there is no need to update their checksums.
+       case IPPROTO_ESP:  // We do not need to bother looking at GRE/ESP headers,
+               break;         // since there is never a checksum to update.
+
+       case IPPROTO_UDP:  // See above comment, but must also have UDP header...
+               if (data + sizeof(*ip4) + sizeof(struct udphdr) > data_end)
+                       return TC_ACT_OK;
+               const struct udphdr *uh = (const struct udphdr *)(ip4 + 1);
+               // If IPv4/UDP checksum is 0 then fallback to clatd so it can calculate the
+               // checksum.  Otherwise the network or more likely the NAT64 gateway might
+               // drop the packet because in most cases IPv6/UDP packets with a zero checksum
+               // are invalid. See RFC 6935.  TODO: calculate checksum via bpf_csum_diff()
+               if (!uh->check)
+                       return TC_ACT_OK;
+               break;
+
+       default:  // do not know how to handle anything else
+               return TC_ACT_OK;
+       }
+       struct ethhdr eth2;  // used iff is_ethernet
+
+       eth2 = *eth;                     // Copy over the ethernet header (src/dst mac)
+       eth2.h_proto = bpf_htons(ETH_P_IPV6);  // But replace the ethertype
+
+       struct ipv6hdr ip6 = {
+               .version = 6,                                    // __u8:4
+               .priority = ip4->tos >> 4,                       // __u8:4
+               .flow_lbl = {(ip4->tos & 0xF) << 4, 0, 0},       // __u8[3]
+               .payload_len = bpf_htons(bpf_ntohs(ip4->tot_len) - 20),  // __be16
+               .nexthdr = ip4->protocol,                        // __u8
+               .hop_limit = ip4->ttl,                           // __u8
+       };
+       ip6.saddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
+       ip6.saddr.in6_u.u6_addr32[1] = 0;
+       ip6.saddr.in6_u.u6_addr32[2] = 0;
+       ip6.saddr.in6_u.u6_addr32[3] = bpf_htonl(1);
+       ip6.daddr.in6_u.u6_addr32[0] = bpf_htonl(0x20010db8);
+       ip6.daddr.in6_u.u6_addr32[1] = 0;
+       ip6.daddr.in6_u.u6_addr32[2] = 0;
+       ip6.daddr.in6_u.u6_addr32[3] = bpf_htonl(2);
+
+       // Calculate the IPv6 16-bit one's complement checksum of the IPv6 header.
+       __wsum sum6 = 0;
+       // We'll end up with a non-zero sum due to ip6.version == 6
+       for (int i = 0; i < sizeof(ip6) / sizeof(__u16); ++i)
+               sum6 += ((__u16 *)&ip6)[i];
+
+       // Packet mutations begin - point of no return, but if this first modification fails
+       // the packet is probably still pristine, so let clatd handle it.
+       if (bpf_skb_change_proto(skb, bpf_htons(ETH_P_IPV6), 0))
+               return TC_ACT_OK;
+
+       // This takes care of updating the skb->csum field for a CHECKSUM_COMPLETE packet.
+       // In such a case, skb->csum is a 16-bit one's complement sum of the entire payload,
+       // thus we need to subtract out the ipv4 header's sum, and add in the ipv6 header's sum.
+       // However, we've already verified the ipv4 checksum is correct and thus 0.
+       // Thus we only need to add the ipv6 header's sum.
+       //
+       // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
+       // (-ENOTSUPP) if it isn't.  So we just ignore the return code (see above for more details).
+       bpf_csum_update(skb, sum6);
+
+       // bpf_skb_change_proto() invalidates all pointers - reload them.
+       data = (void *)(long)skb->data;
+       data_end = (void *)(long)skb->data_end;
+
+       // I cannot think of any valid way for this error condition to trigger, however I do
+       // believe the explicit check is required to keep the in kernel ebpf verifier happy.
+       if (data + l2_header_size + sizeof(ip6) > data_end)
+               return TC_ACT_SHOT;
+
+       struct ethhdr *new_eth = data;
+
+       // Copy over the updated ethernet header
+       *new_eth = eth2;
+       // Copy over the new ipv4 header.
+       *(struct ipv6hdr *)(new_eth + 1) = ip6;
+       return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = ("GPL");
index 47c4d4b..54701c8 100755 (executable)
@@ -810,10 +810,16 @@ ipv4_ping()
        setup
        set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
        ipv4_ping_novrf
+       setup
+       set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+       ipv4_ping_novrf
 
        log_subsection "With VRF"
        setup "yes"
        ipv4_ping_vrf
+       setup "yes"
+       set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+       ipv4_ping_vrf
 }
 
 ################################################################################
@@ -2348,10 +2354,16 @@ ipv6_ping()
        log_subsection "No VRF"
        setup
        ipv6_ping_novrf
+       setup
+       set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+       ipv6_ping_novrf
 
        log_subsection "With VRF"
        setup "yes"
        ipv6_ping_vrf
+       setup "yes"
+       set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+       ipv6_ping_vrf
 }
 
 ################################################################################
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
new file mode 100755 (executable)
index 0000000..807b74c
--- /dev/null
@@ -0,0 +1,101 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run a series of udpgro benchmarks
+
+readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+cleanup() {
+       local -r jobs="$(jobs -p)"
+       local -r ns="$(ip netns list|grep $PEER_NS)"
+
+       [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
+       [ -n "$ns" ] && ip netns del $ns 2>/dev/null
+}
+trap cleanup EXIT
+
+run_one() {
+       # use 'rx' as separator between sender args and receiver args
+       local -r all="$@"
+       local -r tx_args=${all%rx*}
+       local rx_args=${all#*rx}
+
+
+
+       ip netns add "${PEER_NS}"
+       ip -netns "${PEER_NS}" link set lo up
+       ip link add type veth
+       ip link set dev veth0 up
+       ip addr add dev veth0 192.168.1.2/24
+       ip addr add dev veth0 2001:db8::2/64 nodad
+
+       ip link set dev veth1 netns "${PEER_NS}"
+       ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
+       ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
+       ip -netns "${PEER_NS}" link set dev veth1 up
+       ip netns exec "${PEER_NS}" ethtool -K veth1 rx-gro-list on
+
+
+       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+       tc -n "${PEER_NS}" qdisc add dev veth1 clsact
+       tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6  direct-action
+       tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
+        echo ${rx_args}
+       ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+
+       # Hack: let bg programs complete the startup
+       sleep 0.1
+       ./udpgso_bench_tx ${tx_args}
+}
+
+run_in_netns() {
+       local -r args=$@
+  echo ${args}
+       ./in_netns.sh $0 __subprocess ${args}
+}
+
+run_udp() {
+       local -r args=$@
+
+       echo "udp gso - over veth touching data"
+       run_in_netns ${args} -u -S 0 rx -4 -v
+
+       echo "udp gso and gro - over veth touching data"
+       run_in_netns ${args} -S 0 rx -4 -G
+}
+
+run_tcp() {
+       local -r args=$@
+
+       echo "tcp - over veth touching data"
+       run_in_netns ${args} -t rx -4 -t
+}
+
+run_all() {
+       local -r core_args="-l 4"
+       local -r ipv4_args="${core_args} -4  -D 192.168.1.1"
+       local -r ipv6_args="${core_args} -6  -D 2001:db8::1"
+
+       echo "ipv6"
+       run_tcp "${ipv6_args}"
+       run_udp "${ipv6_args}"
+}
+
+if [ ! -f ../bpf/xdp_dummy.o ]; then
+       echo "Missing xdp_dummy helper. Build bpf selftest first"
+       exit -1
+fi
+
+if [ ! -f bpf/nat6to4.o ]; then
+       echo "Missing nat6to4 helper. Build bpfnat6to4.o selftest first"
+       exit -1
+fi
+
+if [[ $# -eq 0 ]]; then
+       run_all
+elif [[ $1 == "__subprocess" ]]; then
+       shift
+       run_one $@
+else
+       run_in_netns $@
+fi
index 04a49e8..5b1ecd0 100644 (file)
@@ -57,9 +57,9 @@ CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_prog
 CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c)
 CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie)
 
-TARGETS := protection_keys
-BINARIES_32 := $(TARGETS:%=%_32)
-BINARIES_64 := $(TARGETS:%=%_64)
+VMTARGETS := protection_keys
+BINARIES_32 := $(VMTARGETS:%=%_32)
+BINARIES_64 := $(VMTARGETS:%=%_64)
 
 ifeq ($(CAN_BUILD_WITH_NOPIE),1)
 CFLAGS += -no-pie
@@ -112,7 +112,7 @@ $(BINARIES_32): CFLAGS += -m32 -mxsave
 $(BINARIES_32): LDLIBS += -lrt -ldl -lm
 $(BINARIES_32): $(OUTPUT)/%_32: %.c
        $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
-$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
+$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-32,$(t))))
 endif
 
 ifeq ($(CAN_BUILD_X86_64),1)
@@ -120,7 +120,7 @@ $(BINARIES_64): CFLAGS += -m64 -mxsave
 $(BINARIES_64): LDLIBS += -lrt -ldl
 $(BINARIES_64): $(OUTPUT)/%_64: %.c
        $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
-$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
+$(foreach t,$(VMTARGETS),$(eval $(call gen-target-rule-64,$(t))))
 endif
 
 # x86_64 users should be encouraged to install 32-bit libraries