Merge tag 'zonefs-5.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Dec 2020 22:22:42 +0000 (14:22 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Dec 2020 22:22:42 +0000 (14:22 -0800)
Pull zonefs fix from Damien Le Moal:
 "A single patch in this pull request to fix a BIO and page reference
  leak when writing sequential zone files"

* tag 'zonefs-5.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs:
  zonefs: fix page reference and BIO leak

195 files changed:
MAINTAINERS
arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts
arch/arm/boot/dts/sun7i-a20-bananapi.dts
arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
arch/arm/boot/dts/sun8i-s3-pinecube.dts
arch/arm/boot/dts/sun8i-v3s.dtsi
arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
arch/arm/mach-imx/anatop.c
arch/arm/mach-keystone/memory.h
arch/arm/mach-sunxi/sunxi.c
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts
arch/powerpc/mm/Makefile
arch/powerpc/mm/maccess.c [new file with mode: 0644]
arch/sparc/lib/csum_copy.S
drivers/clk/imx/Kconfig
drivers/clk/renesas/r9a06g032-clocks.c
drivers/firmware/xilinx/zynqmp.c
drivers/gpio/gpio-arizona.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/pm/inc/smu10.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_mocs.c
drivers/gpu/drm/i915/gt/shmem_utils.c
drivers/gpu/drm/i915/selftests/i915_gem.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/qedr/verbs.c
drivers/iommu/amd/amd_iommu_types.h
drivers/media/cec/usb/pulse8/pulse8-cec.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/rc/mtk-cir.c
drivers/media/test-drivers/vidtv/vidtv_channel.c
drivers/media/test-drivers/vidtv/vidtv_psi.h
drivers/media/test-drivers/vidtv/vidtv_s302m.c
drivers/media/test-drivers/vidtv/vidtv_ts.h
drivers/net/bonding/bond_options.c
drivers/net/can/softing/softing_main.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/ocelot/seville_vsc9953.c
drivers/net/ethernet/agere/Kconfig
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
drivers/net/ethernet/faraday/Kconfig
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
drivers/net/ethernet/freescale/enetc/enetc_hw.h
drivers/net/ethernet/freescale/fman/Kconfig
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/prestera/prestera_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/microchip/Kconfig
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_vsc7514.c
drivers/net/ethernet/netronome/Kconfig
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/nxp/Kconfig
drivers/net/ethernet/rocker/Kconfig
drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw_priv.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/geneve.c
drivers/net/ipa/gsi_trans.c
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/netdevsim.h
drivers/net/vrf.c
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/aspeed/pinmux-aspeed.h
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/intel/pinctrl-jasperlake.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/pinctrl-amd.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
fs/afs/super.c
fs/nfs/Kconfig
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs42proc.c
fs/nfs/nfs42xdr.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/pagelist.c
fs/proc/task_mmu.c
fs/seq_file.c
include/linux/build_bug.h
include/linux/elfcore.h
include/linux/netfilter/x_tables.h
include/linux/nfs_page.h
include/linux/security.h
include/linux/stmmac.h
include/net/bonding.h
include/net/netfilter/nf_tables.h
include/net/xdp.h
include/soc/mscc/ocelot.h
include/uapi/linux/bpf.h
init/initramfs.c
kernel/Makefile
kernel/bpf/helpers.c
kernel/bpf/verifier.c
kernel/elfcore.c [deleted file]
kernel/trace/bpf_trace.c
kernel/trace/trace.c
lib/Makefile
mm/filemap.c
mm/hugetlb.c
mm/kasan/quarantine.c
mm/madvise.c
net/bridge/br_device.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/can/isotp.c
net/core/dev.c
net/core/flow_offload.c
net/core/lwt_bpf.c
net/core/xdp.c
net/ethtool/bitset.c
net/ipv4/fib_frontend.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/tcp_ipv6.c
net/mac80211/iface.c
net/mac80211/mesh_pathtbl.c
net/mac80211/util.c
net/mptcp/mib.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_ct.c
net/netfilter/nft_dynset.c
net/netfilter/x_tables.c
net/openvswitch/flow_netlink.c
net/sched/cls_flower.c
net/sched/sch_fq_pie.c
net/tipc/node.c
net/wireless/nl80211.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
net/xdp/xsk_queue.h
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_state.c
tools/bpf/bpftool/pids.c
tools/include/uapi/linux/bpf.h
tools/lib/bpf/ringbuf.c
tools/testing/ktest/ktest.pl
tools/testing/selftests/bpf/prog_tests/align.c
tools/testing/selftests/bpf/prog_tests/ringbuf.c
tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/verifier/array_access.c
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/udpgso_bench_rx.c

index 6f47415..281de21 100644 (file)
@@ -1486,10 +1486,20 @@ F:      Documentation/devicetree/bindings/iommu/arm,smmu*
 F:     drivers/iommu/arm/
 F:     drivers/iommu/io-pgtable-arm*
 
+ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
+M:     Arnd Bergmann <arnd@arndb.de>
+M:     Olof Johansson <olof@lixom.net>
+M:     soc@kernel.org
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
+F:     arch/arm/boot/dts/Makefile
+F:     arch/arm64/boot/dts/Makefile
+
 ARM SUB-ARCHITECTURES
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
 F:     arch/arm/mach-*/
 F:     arch/arm/plat-*/
 
@@ -3237,7 +3247,7 @@ R:        Martin KaFai Lau <kafai@fb.com>
 R:     Song Liu <songliubraving@fb.com>
 R:     Yonghong Song <yhs@fb.com>
 R:     John Fastabend <john.fastabend@gmail.com>
-R:     KP Singh <kpsingh@chromium.org>
+R:     KP Singh <kpsingh@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Supported
@@ -3356,7 +3366,7 @@ F:        arch/x86/net/
 X:     arch/x86/net/bpf_jit_comp32.c
 
 BPF LSM (Security Audit and Enforcement using BPF)
-M:     KP Singh <kpsingh@chromium.org>
+M:     KP Singh <kpsingh@kernel.org>
 R:     Florent Revest <revest@chromium.org>
 R:     Brendan Jackman <jackmanb@chromium.org>
 L:     bpf@vger.kernel.org
@@ -10553,6 +10563,13 @@ S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
 F:     drivers/net/ethernet/marvell/octeontx2/af/
 
+MARVELL PRESTERA ETHERNET SWITCH DRIVER
+M:     Vadym Kochan <vkochan@marvell.com>
+M:     Taras Chornyi <tchornyi@marvell.com>
+S:     Supported
+W:     https://github.com/Marvell-switching/switchdev-prestera
+F:     drivers/net/ethernet/marvell/prestera/
+
 MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
 M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Odd Fixes
index 265f5f3..24f793c 100644 (file)
 
        pinctrl_i2c3: i2c3grp {
                fsl,pins = <
-                       MX6QDL_PAD_GPIO_3__I2C3_SCL             0x4001b8b1
+                       MX6QDL_PAD_GPIO_5__I2C3_SCL             0x4001b8b1
                        MX6QDL_PAD_GPIO_16__I2C3_SDA            0x4001b8b1
                >;
        };
index 9390979..b9b698f 100644 (file)
                                MX6QDL_PAD_RGMII_RD2__RGMII_RD2         0x1b030
                                MX6QDL_PAD_RGMII_RD3__RGMII_RD3         0x1b030
                                MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL   0x1b030
-                               MX6QDL_PAD_GPIO_6__ENET_IRQ             0x000b1
                        >;
                };
 
index adde62d..342304f 100644 (file)
 };
 
 &ssp3 {
-       /delete-property/ #address-cells;
-       /delete-property/ #size-cells;
+       #address-cells = <0>;
        spi-slave;
        status = "okay";
        ready-gpios = <&gpio 125 GPIO_ACTIVE_HIGH>;
index bb3987e..0b3d9ae 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&gmac_rgmii_pins>;
        phy-handle = <&phy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-supply = <&reg_gmac_3v3>;
        status = "okay";
 };
index fce2f7f..bf38c66 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Adam Sampson <ats@offog.org>
+ * Copyright 2015-2020 Adam Sampson <ats@offog.org>
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
        pinctrl-names = "default";
        pinctrl-0 = <&gmac_rgmii_pins>;
        phy-handle = <&phy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        status = "okay";
 };
 
index 9bab6b7..4aa0ee8 100644 (file)
@@ -10,7 +10,7 @@
 
 / {
        model = "PineCube IP Camera";
-       compatible = "pine64,pinecube", "allwinner,sun8i-s3";
+       compatible = "pine64,pinecube", "sochip,s3", "allwinner,sun8i-v3";
 
        aliases {
                serial0 = &uart2;
index 0c73416..89abd4c 100644 (file)
                gic: interrupt-controller@1c81000 {
                        compatible = "arm,gic-400";
                        reg = <0x01c81000 0x1000>,
-                             <0x01c82000 0x1000>,
+                             <0x01c82000 0x2000>,
                              <0x01c84000 0x2000>,
                              <0x01c86000 0x2000>;
                        interrupt-controller;
index 15c22b0..4795455 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&gmac_rgmii_pins>;
        phy-handle = <&phy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-supply = <&reg_dc1sw>;
        status = "okay";
 };
 };
 
 &reg_dc1sw {
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
+       regulator-min-microvolt = <3300000>;
+       regulator-max-microvolt = <3300000>;
        regulator-name = "vcc-gmac-phy";
 };
 
 &reg_dcdc1 {
        regulator-always-on;
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
-       regulator-name = "vcc-3v0";
+       regulator-min-microvolt = <3300000>;
+       regulator-max-microvolt = <3300000>;
+       regulator-name = "vcc-3v3";
 };
 
 &reg_dcdc2 {
index d841bed..7bb47eb 100644 (file)
@@ -136,7 +136,7 @@ void __init imx_init_revision_from_anatop(void)
 
                        src_np = of_find_compatible_node(NULL, NULL,
                                                     "fsl,imx6ul-src");
-                       src_base = of_iomap(np, 0);
+                       src_base = of_iomap(src_np, 0);
                        of_node_put(src_np);
                        WARN_ON(!src_base);
                        sbmr2 = readl_relaxed(src_base + SRC_SBMR2);
index 9147565..1b9ed12 100644 (file)
@@ -6,9 +6,6 @@
 #ifndef __MEMORY_H
 #define __MEMORY_H
 
-#define MAX_PHYSMEM_BITS       36
-#define SECTION_SIZE_BITS      34
-
 #define KEYSTONE_LOW_PHYS_START                0x80000000ULL
 #define KEYSTONE_LOW_PHYS_SIZE         0x80000000ULL /* 2G */
 #define KEYSTONE_LOW_PHYS_END          (KEYSTONE_LOW_PHYS_START + \
index 06da274..1963572 100644 (file)
@@ -66,6 +66,7 @@ static const char * const sun8i_board_dt_compat[] = {
        "allwinner,sun8i-h2-plus",
        "allwinner,sun8i-h3",
        "allwinner,sun8i-r40",
+       "allwinner,sun8i-v3",
        "allwinner,sun8i-v3s",
        NULL,
 };
index 9ebb9e0..d406974 100644 (file)
@@ -79,7 +79,7 @@
 &emac {
        pinctrl-names = "default";
        pinctrl-0 = <&rgmii_pins>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&ext_rgmii_phy>;
        phy-supply = <&reg_dc1sw>;
        status = "okay";
index 4f9ba53..9d93fe1 100644 (file)
@@ -96,7 +96,7 @@
        pinctrl-0 = <&emac_rgmii_pins>;
        phy-supply = <&reg_gmac_3v3>;
        phy-handle = <&ext_rgmii_phy>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        status = "okay";
 };
 
index fceb298..29a081e 100644 (file)
@@ -27,7 +27,7 @@
 &emac {
        pinctrl-names = "default";
        pinctrl-0 = <&ext_rgmii_pins>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&ext_rgmii_phy>;
        phy-supply = <&reg_gmac_3v3>;
        allwinner,rx-delay-ps = <200>;
index 5e14798..55b4a8b 100644 (file)
@@ -5,7 +5,7 @@
 
 ccflags-$(CONFIG_PPC64)        := $(NO_MINIMAL_TOC)
 
-obj-y                          := fault.o mem.o pgtable.o mmap.o \
+obj-y                          := fault.o mem.o pgtable.o mmap.o maccess.o \
                                   init_$(BITS).o pgtable_$(BITS).o \
                                   pgtable-frag.o ioremap.o ioremap_$(BITS).o \
                                   init-common.o mmu_context.o drmem.o
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
new file mode 100644 (file)
index 0000000..fa9a7a7
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+       return is_kernel_addr((unsigned long)unsafe_src);
+}
index 0c0268e..d839956 100644 (file)
@@ -71,7 +71,7 @@
 FUNC_NAME:             /* %o0=src, %o1=dst, %o2=len */
        LOAD(prefetch, %o0 + 0x000, #n_reads)
        xor             %o0, %o1, %g1
-       mov             1, %o3
+       mov             -1, %o3
        clr             %o4
        andcc           %g1, 0x3, %g0
        bne,pn          %icc, 95f
index 3b393cb..3061896 100644 (file)
@@ -5,8 +5,8 @@ config MXC_CLK
        depends on ARCH_MXC || COMPILE_TEST
 
 config MXC_CLK_SCU
-       tristate "IMX SCU clock"
-       depends on ARCH_MXC || COMPILE_TEST
+       tristate
+       depends on ARCH_MXC
        depends on IMX_SCU && HAVE_ARM_SMCCC
 
 config CLK_IMX1
index d900f6b..892e91b 100644 (file)
@@ -55,7 +55,7 @@ struct r9a06g032_clkdesc {
                        u16 sel, g1, r1, g2, r2;
                } dual;
        };
-} __packed;
+};
 
 #define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
        { .gate = _clk, .reset = _rst, \
index d08ac82..fd95ede 100644 (file)
@@ -29,7 +29,7 @@
 #define PM_API_FEATURE_CHECK_MAX_ORDER  7
 
 static bool feature_check_enabled;
-DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
 
 /**
  * struct pm_api_feature_data - PM API Feature data
index 5bda38e..2bc173c 100644 (file)
@@ -192,6 +192,7 @@ static int arizona_gpio_probe(struct platform_device *pdev)
        ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip,
                                     arizona_gpio);
        if (ret < 0) {
+               pm_runtime_disable(&pdev->dev);
                dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
                        ret);
                return ret;
index 2a9046c..4275c18 100644 (file)
@@ -724,6 +724,8 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
                        return err;
        }
 
+       platform_set_drvdata(pdev, gpio);
+
        return 0;
 }
 
index ad61daf..865ab2b 100644 (file)
@@ -598,7 +598,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
                 */
                res = platform_get_resource(pdev, IORESOURCE_MEM, i);
                if (!res)
-                       continue;
+                       break;
 
                sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
                if (IS_ERR(sprd_eic->base[i]))
index 433e2c3..2f24559 100644 (file)
@@ -1197,6 +1197,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
 
        devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
 
+       /* Some MVEBU SoCs have simple PWM support for GPIO lines */
+       if (IS_ENABLED(CONFIG_PWM)) {
+               err = mvebu_pwm_probe(pdev, mvchip, id);
+               if (err)
+                       return err;
+       }
+
        /* Some gpio controllers do not provide irq support */
        if (!have_irqs)
                return 0;
@@ -1206,7 +1213,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
        if (!mvchip->domain) {
                dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
                        mvchip->chip.label);
-               return -ENODEV;
+               err = -ENODEV;
+               goto err_pwm;
        }
 
        err = irq_alloc_domain_generic_chips(
@@ -1254,14 +1262,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                                                 mvchip);
        }
 
-       /* Some MVEBU SoCs have simple PWM support for GPIO lines */
-       if (IS_ENABLED(CONFIG_PWM))
-               return mvebu_pwm_probe(pdev, mvchip, id);
-
        return 0;
 
 err_domain:
        irq_domain_remove(mvchip->domain);
+err_pwm:
+       pwmchip_remove(&mvchip->mvpwm->chip);
 
        return err;
 }
index 0b5a17a..3521c1d 100644 (file)
@@ -574,7 +574,7 @@ static int zynq_gpio_irq_reqres(struct irq_data *d)
        struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
        int ret;
 
-       ret = pm_runtime_get_sync(chip->parent);
+       ret = pm_runtime_resume_and_get(chip->parent);
        if (ret < 0)
                return ret;
 
@@ -942,7 +942,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
 
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0)
                goto err_pm_dis;
 
index 089ddca..6e3c4d7 100644 (file)
@@ -1806,6 +1806,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
  */
 void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
 {
+#ifdef CONFIG_PINCTRL
+       if (list_empty(&gc->gpiodev->pin_ranges))
+               return;
+#endif
+
        pinctrl_gpio_free(gc->gpiodev->base + offset);
 }
 EXPORT_SYMBOL_GPL(gpiochip_generic_free);
index 9579349..1b56dbc 100644 (file)
@@ -459,6 +459,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_bo *bo;
        struct amdgpu_bo_param bp;
+       struct drm_gem_object *gobj;
        int ret;
 
        memset(&bp, 0, sizeof(bp));
@@ -469,17 +470,20 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
        bp.type = ttm_bo_type_sg;
        bp.resv = resv;
        dma_resv_lock(resv, NULL);
-       ret = amdgpu_bo_create(adev, &bp, &bo);
+       ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
+                       AMDGPU_GEM_DOMAIN_CPU,
+                       0, ttm_bo_type_sg, resv, &gobj);
        if (ret)
                goto error;
 
+       bo = gem_to_amdgpu_bo(gobj);
        bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
        bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
        if (dma_buf->ops != &amdgpu_dmabuf_ops)
                bo->prime_shared_count = 1;
 
        dma_resv_unlock(resv);
-       return &bo->tbo.base;
+       return gobj;
 
 error:
        dma_resv_unlock(resv);
index 7e8265d..e8c76bd 100644 (file)
@@ -66,26 +66,12 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
        bp.type = type;
        bp.resv = resv;
        bp.preferred_domain = initial_domain;
-retry:
        bp.flags = flags;
        bp.domain = initial_domain;
        r = amdgpu_bo_create(adev, &bp, &bo);
-       if (r) {
-               if (r != -ERESTARTSYS) {
-                       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
-                               flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-                               goto retry;
-                       }
-
-                       if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
-                               initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
-                               goto retry;
-                       }
-                       DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
-                                 size, initial_domain, alignment, r);
-               }
+       if (r)
                return r;
-       }
+
        *obj = &bo->tbo.base;
 
        return 0;
@@ -225,7 +211,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
        uint64_t size = args->in.bo_size;
        struct dma_resv *resv = NULL;
        struct drm_gem_object *gobj;
-       uint32_t handle;
+       uint32_t handle, initial_domain;
        int r;
 
        /* reject invalid gem flags */
@@ -269,9 +255,28 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
                resv = vm->root.base.bo->tbo.base.resv;
        }
 
+retry:
+       initial_domain = (u32)(0xffffffff & args->in.domains);
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
-                                    (u32)(0xffffffff & args->in.domains),
+                                    initial_domain,
                                     flags, ttm_bo_type_device, resv, &gobj);
+       if (r) {
+               if (r != -ERESTARTSYS) {
+                       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+                               flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                               goto retry;
+                       }
+
+                       if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+                               initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+                               goto retry;
+                       }
+                       DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+                                 size, initial_domain, args->in.alignment, r);
+               }
+               return r;
+       }
+
        if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
                if (!r) {
                        struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
index 36604d7..3e4892b 100644 (file)
@@ -499,6 +499,9 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
        else
                size = amdgpu_gmc_get_vbios_fb_size(adev);
 
+       if (adev->mman.keep_stolen_vga_memory)
+               size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+
        /* set to 0 if the pre-OS buffer uses up most of vram */
        if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
                size = 0;
index 4e36551..82cd8e5 100644 (file)
@@ -1172,7 +1172,7 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
                        con->dir, &con->disable_ras_err_cnt_harvest);
 }
 
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
                struct ras_fs_if *head)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -1194,7 +1194,6 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
 
 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
 {
-#if defined(CONFIG_DEBUG_FS)
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
        struct ras_fs_if fs_info;
@@ -1203,7 +1202,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
         * it won't be called in resume path, no need to check
         * suspend and gpu reset status
         */
-       if (!con)
+       if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
                return;
 
        amdgpu_ras_debugfs_create_ctrl_node(adev);
@@ -1217,10 +1216,9 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
                        amdgpu_ras_debugfs_create(adev, &fs_info);
                }
        }
-#endif
 }
 
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
                struct ras_common_if *head)
 {
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
@@ -1234,7 +1232,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
 
 static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
 {
-#if defined(CONFIG_DEBUG_FS)
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;
 
@@ -1243,7 +1240,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
        }
 
        con->dir = NULL;
-#endif
 }
 /* debugfs end */
 
@@ -1291,7 +1287,8 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
 
 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
 {
-       amdgpu_ras_debugfs_remove_all(adev);
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               amdgpu_ras_debugfs_remove_all(adev);
        amdgpu_ras_sysfs_remove_all(adev);
        return 0;
 }
index 6b8d7bb..ec398ed 100644 (file)
@@ -607,14 +607,8 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
                struct ras_common_if *head);
 
-void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
-               struct ras_fs_if *head);
-
 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
 
-void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
-               struct ras_common_if *head);
-
 int amdgpu_ras_error_query(struct amdgpu_device *adev,
                struct ras_query_if *info);
 
index 9f39527..2a48505 100644 (file)
@@ -186,7 +186,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
                        if (err)
                                goto out;
 
-                       err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
+                       err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
                        if (err)
                                goto out;
                }
index 222f1df..8cc51ce 100644 (file)
@@ -1736,6 +1736,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
        }
 
        mutex_unlock(&p->mutex);
+       dma_buf_put(dmabuf);
 
        args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
 
@@ -1745,6 +1746,7 @@ err_free:
        amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
 err_unlock:
        mutex_unlock(&p->mutex);
+       dma_buf_put(dmabuf);
        return r;
 }
 
index 9b6809f..0f7749e 100644 (file)
@@ -1058,9 +1058,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                goto error;
        }
 
-       /* Update the actual used number of crtc */
-       adev->mode_info.num_crtc = adev->dm.display_indexes_num;
-
        /* create fake encoders for MST */
        dm_dp_create_fake_mst_encoders(adev);
 
@@ -3251,6 +3248,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        enum dc_connection_type new_connection_type = dc_connection_none;
        const struct dc_plane_cap *plane;
 
+       dm->display_indexes_num = dm->dc->caps.max_streams;
+       /* Update the actual used number of crtc */
+       adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
        link_cnt = dm->dc->caps.max_links;
        if (amdgpu_dm_mode_config_init(dm->adev)) {
                DRM_ERROR("DM: Failed to initialize mode config\n");
@@ -3312,8 +3313,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        goto fail;
                }
 
-       dm->display_indexes_num = dm->dc->caps.max_streams;
-
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
index c001307..6b431db 100644 (file)
@@ -579,7 +579,7 @@ static struct clk_bw_params rn_bw_params = {
 
 };
 
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table_gs = {
        .entries = {
                {
                        .wm_inst = WM_A,
@@ -616,7 +616,7 @@ static struct wm_table ddr4_wm_table = {
        }
 };
 
-static struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table_gs = {
        .entries = {
                {
                        .wm_inst = WM_A,
@@ -690,6 +690,80 @@ static struct wm_table lpddr4_wm_table_with_disabled_ppt = {
        }
 };
 
+static struct wm_table ddr4_wm_table_rn = {
+       .entries = {
+               {
+                       .wm_inst = WM_A,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 9.09,
+                       .sr_enter_plus_exit_time_us = 10.14,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_B,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 10.12,
+                       .sr_enter_plus_exit_time_us = 11.48,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_C,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 10.12,
+                       .sr_enter_plus_exit_time_us = 11.48,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_D,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.72,
+                       .sr_exit_time_us = 10.12,
+                       .sr_enter_plus_exit_time_us = 11.48,
+                       .valid = true,
+               },
+       }
+};
+
+static struct wm_table lpddr4_wm_table_rn = {
+       .entries = {
+               {
+                       .wm_inst = WM_A,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.65333,
+                       .sr_exit_time_us = 7.32,
+                       .sr_enter_plus_exit_time_us = 8.38,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_B,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.65333,
+                       .sr_exit_time_us = 9.82,
+                       .sr_enter_plus_exit_time_us = 11.196,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_C,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.65333,
+                       .sr_exit_time_us = 9.89,
+                       .sr_enter_plus_exit_time_us = 11.24,
+                       .valid = true,
+               },
+               {
+                       .wm_inst = WM_D,
+                       .wm_type = WM_TYPE_PSTATE_CHG,
+                       .pstate_latency_us = 11.65333,
+                       .sr_exit_time_us = 9.748,
+                       .sr_enter_plus_exit_time_us = 11.102,
+                       .valid = true,
+               },
+       }
+};
+
 static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
 {
        int i;
@@ -771,6 +845,11 @@ void rn_clk_mgr_construct(
        struct dc_debug_options *debug = &ctx->dc->debug;
        struct dpm_clocks clock_table = { 0 };
        enum pp_smu_status status = 0;
+       int is_green_sardine = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
+#endif
 
        clk_mgr->base.ctx = ctx;
        clk_mgr->base.funcs = &dcn21_funcs;
@@ -811,10 +890,16 @@ void rn_clk_mgr_construct(
                        if (clk_mgr->periodic_retraining_disabled) {
                                rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
                        } else {
-                               rn_bw_params.wm_table = lpddr4_wm_table;
+                               if (is_green_sardine)
+                                       rn_bw_params.wm_table = lpddr4_wm_table_gs;
+                               else
+                                       rn_bw_params.wm_table = lpddr4_wm_table_rn;
                        }
                } else {
-                       rn_bw_params.wm_table = ddr4_wm_table;
+                       if (is_green_sardine)
+                               rn_bw_params.wm_table = ddr4_wm_table_gs;
+                       else
+                               rn_bw_params.wm_table = ddr4_wm_table_rn;
                }
                /* Saved clocks configured at boot for debug purposes */
                rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
index fec87a2..5b0cedf 100644 (file)
@@ -3394,10 +3394,13 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
 {
        uint32_t bits_per_channel = 0;
        uint32_t kbps;
+       struct fixed31_32 link_bw_kbps;
 
        if (timing->flags.DSC) {
-               kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
-               kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+               link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz);
+               link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160);
+               link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel);
+               kbps = dc_fixpt_ceil(link_bw_kbps);
                return kbps;
        }
 
index b965205..9e837a5 100644 (file)
 #define FEATURE_CORE_CSTATES_MASK     (1 << FEATURE_CORE_CSTATES_BIT)
 
 /* Workload bits */
-#define WORKLOAD_DEFAULT_BIT              0
-#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
-#define WORKLOAD_PPLIB_POWER_SAVING_BIT   2
-#define WORKLOAD_PPLIB_VIDEO_BIT          3
-#define WORKLOAD_PPLIB_VR_BIT             4
-#define WORKLOAD_PPLIB_COMPUTE_BIT        5
-#define WORKLOAD_PPLIB_CUSTOM_BIT         6
-#define WORKLOAD_PPLIB_COUNT              7
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
+#define WORKLOAD_PPLIB_VIDEO_BIT          2
+#define WORKLOAD_PPLIB_VR_BIT             3
+#define WORKLOAD_PPLIB_COMPUTE_BIT        4
+#define WORKLOAD_PPLIB_CUSTOM_BIT         5
+#define WORKLOAD_PPLIB_COUNT              6
 
 typedef struct {
        /* MP1_EXT_SCRATCH0 */
index 719597c..6606511 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/pci.h>
+
 #include <drm/amdgpu_drm.h>
 #include "processpptables.h"
 #include <atom-types.h>
@@ -984,6 +986,8 @@ static int init_thermal_controller(
                        struct pp_hwmgr *hwmgr,
                        const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
 {
+       struct amdgpu_device *adev = hwmgr->adev;
+
        hwmgr->thermal_controller.ucType =
                        powerplay_table->sThermalController.ucType;
        hwmgr->thermal_controller.ucI2cLine =
@@ -1008,7 +1012,104 @@ static int init_thermal_controller(
                   ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
                   PHM_PlatformCaps_ThermalController);
 
-       hwmgr->thermal_controller.use_hw_fan_control = 1;
+        if (powerplay_table->usTableSize >= sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) {
+               const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 =
+                       (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table;
+
+               if (0 == le16_to_cpu(powerplay_table3->usFanTableOffset)) {
+                       hwmgr->thermal_controller.use_hw_fan_control = 1;
+                       return 0;
+               } else {
+                       const ATOM_PPLIB_FANTABLE *fan_table =
+                               (const ATOM_PPLIB_FANTABLE *)(((unsigned long)powerplay_table) +
+                                                             le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+                       if (1 <= fan_table->ucFanTableFormat) {
+                               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst =
+                                       fan_table->ucTHyst;
+                               hwmgr->thermal_controller.advanceFanControlParameters.usTMin =
+                                       le16_to_cpu(fan_table->usTMin);
+                               hwmgr->thermal_controller.advanceFanControlParameters.usTMed =
+                                       le16_to_cpu(fan_table->usTMed);
+                               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh =
+                                       le16_to_cpu(fan_table->usTHigh);
+                               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+                                       le16_to_cpu(fan_table->usPWMMin);
+                               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed =
+                                       le16_to_cpu(fan_table->usPWMMed);
+                               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh =
+                                       le16_to_cpu(fan_table->usPWMHigh);
+                               hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900;
+                               hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay = 100000;
+
+                               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                           PHM_PlatformCaps_MicrocodeFanControl);
+                       }
+
+                       if (2 <= fan_table->ucFanTableFormat) {
+                               const ATOM_PPLIB_FANTABLE2 *fan_table2 =
+                                       (const ATOM_PPLIB_FANTABLE2 *)(((unsigned long)powerplay_table) +
+                                                                      le16_to_cpu(powerplay_table3->usFanTableOffset));
+                               hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+                                       le16_to_cpu(fan_table2->usTMax);
+                       }
+
+                       if (3 <= fan_table->ucFanTableFormat) {
+                               const ATOM_PPLIB_FANTABLE3 *fan_table3 =
+                                       (const ATOM_PPLIB_FANTABLE3 *) (((unsigned long)powerplay_table) +
+                                                                       le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+                               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode =
+                                       fan_table3->ucFanControlMode;
+
+                               if ((3 == fan_table->ucFanTableFormat) &&
+                                   (0x67B1 == adev->pdev->device))
+                                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+                                               47;
+                               else
+                                       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM =
+                                               le16_to_cpu(fan_table3->usFanPWMMax);
+
+                               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity =
+                                       4836;
+                               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+                                       le16_to_cpu(fan_table3->usFanOutputSensitivity);
+                       }
+
+                       if (6 <= fan_table->ucFanTableFormat) {
+                               const ATOM_PPLIB_FANTABLE4 *fan_table4 =
+                                       (const ATOM_PPLIB_FANTABLE4 *)(((unsigned long)powerplay_table) +
+                                                                      le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+                               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                           PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+                               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM =
+                                       le16_to_cpu(fan_table4->usFanRPMMax);
+                       }
+
+                       if (7 <= fan_table->ucFanTableFormat) {
+                               const ATOM_PPLIB_FANTABLE5 *fan_table5 =
+                                       (const ATOM_PPLIB_FANTABLE5 *)(((unsigned long)powerplay_table) +
+                                                                      le16_to_cpu(powerplay_table3->usFanTableOffset));
+
+                               if (0x67A2 == adev->pdev->device ||
+                                   0x67A9 == adev->pdev->device ||
+                                   0x67B9 == adev->pdev->device) {
+                                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                                   PHM_PlatformCaps_GeminiRegulatorFanControlSupport);
+                                       hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentLow =
+                                               le16_to_cpu(fan_table5->usFanCurrentLow);
+                                       hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentHigh =
+                                               le16_to_cpu(fan_table5->usFanCurrentHigh);
+                                       hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMLow =
+                                               le16_to_cpu(fan_table5->usFanRPMLow);
+                                       hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMHigh =
+                                               le16_to_cpu(fan_table5->usFanRPMHigh);
+                               }
+                       }
+               }
+       }
 
        return 0;
 }
index cf60f39..e6f40ee 100644 (file)
@@ -1297,15 +1297,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
        int pplib_workload = 0;
 
        switch (power_profile) {
-       case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
-               pplib_workload = WORKLOAD_DEFAULT_BIT;
-               break;
        case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
                pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
                break;
-       case PP_SMC_POWER_PROFILE_POWERSAVING:
-               pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
-               break;
        case PP_SMC_POWER_PROFILE_VIDEO:
                pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
                break;
@@ -1315,6 +1309,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
        case PP_SMC_POWER_PROFILE_COMPUTE:
                pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
                break;
+       case PP_SMC_POWER_PROFILE_CUSTOM:
+               pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+               break;
        }
 
        return pplib_workload;
index 895d89b..cf7c4f0 100644 (file)
@@ -217,7 +217,7 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,                WORKLOAD_PPLIB_VIDEO_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,                   WORKLOAD_PPLIB_VR_BIT),
-       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_CUSTOM_BIT),
+       WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_COMPUTE_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
 };
 
index 3bfe6ed..aabf09f 100644 (file)
@@ -18040,7 +18040,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
         */
        ret = intel_initial_commit(&i915->drm);
        if (ret)
-               return ret;
+               drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
 
        intel_overlay_setup(i915);
 
index bf1e9cf..9bc59fd 100644 (file)
@@ -573,7 +573,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
                return 0;
        }
        /* Also take into account max slice width */
-       min_slice_count = min_t(u8, min_slice_count,
+       min_slice_count = max_t(u8, min_slice_count,
                                DIV_ROUND_UP(mode_hdisplay,
                                             max_slice_width));
 
index 1904e6e..b07dc11 100644 (file)
@@ -3097,7 +3097,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
                        break;
 }
 
-static void eb_request_add(struct i915_execbuffer *eb)
+static int eb_request_add(struct i915_execbuffer *eb, int err)
 {
        struct i915_request *rq = eb->request;
        struct intel_timeline * const tl = i915_request_timeline(rq);
@@ -3118,6 +3118,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
                /* Serialise with context_close via the add_to_timeline */
                i915_request_set_error_once(rq, -ENOENT);
                __i915_request_skip(rq);
+               err = -ENOENT; /* override any transient errors */
        }
 
        __i915_request_queue(rq, &attr);
@@ -3127,6 +3128,8 @@ static void eb_request_add(struct i915_execbuffer *eb)
                retire_requests(tl, prev);
 
        mutex_unlock(&tl->mutex);
+
+       return err;
 }
 
 static const i915_user_extension_fn execbuf_extensions[] = {
@@ -3332,7 +3335,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        err = eb_submit(&eb, batch);
 err_request:
        i915_request_get(eb.request);
-       eb_request_add(&eb);
+       err = eb_request_add(&eb, err);
 
        if (eb.fences)
                signal_fence_array(&eb);
index 0952bf1..724b2cb 100644 (file)
@@ -2788,6 +2788,9 @@ static void __execlists_hold(struct i915_request *rq)
 static bool execlists_hold(struct intel_engine_cs *engine,
                           struct i915_request *rq)
 {
+       if (i915_request_on_hold(rq))
+               return false;
+
        spin_lock_irq(&engine->active.lock);
 
        if (i915_request_completed(rq)) { /* too late! */
@@ -3169,8 +3172,10 @@ static void execlists_submission_tasklet(unsigned long data)
                spin_unlock_irqrestore(&engine->active.lock, flags);
 
                /* Recheck after serialising with direct-submission */
-               if (unlikely(timeout && preempt_timeout(engine)))
+               if (unlikely(timeout && preempt_timeout(engine))) {
+                       cancel_timer(&engine->execlists.preempt);
                        execlists_reset(engine, "preemption time out");
+               }
        }
 }
 
index 4f74706..413dadf 100644 (file)
@@ -59,8 +59,7 @@ struct drm_i915_mocs_table {
 #define _L3_CACHEABILITY(value)        ((value) << 4)
 
 /* Helper defines */
-#define GEN9_NUM_MOCS_ENTRIES  62  /* 62 out of 64 - 63 & 64 are reserved. */
-#define GEN11_NUM_MOCS_ENTRIES 64  /* 63-64 are reserved, but configured. */
+#define GEN9_NUM_MOCS_ENTRIES  64  /* 63-64 are reserved, but configured. */
 
 /* (e)LLC caching options */
 /*
@@ -328,11 +327,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
        if (INTEL_GEN(i915) >= 12) {
                table->size  = ARRAY_SIZE(tgl_mocs_table);
                table->table = tgl_mocs_table;
-               table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+               table->n_entries = GEN9_NUM_MOCS_ENTRIES;
        } else if (IS_GEN(i915, 11)) {
                table->size  = ARRAY_SIZE(icl_mocs_table);
                table->table = icl_mocs_table;
-               table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+               table->n_entries = GEN9_NUM_MOCS_ENTRIES;
        } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
                table->size  = ARRAY_SIZE(skl_mocs_table);
                table->n_entries = GEN9_NUM_MOCS_ENTRIES;
index 463af67..5982b62 100644 (file)
@@ -73,7 +73,7 @@ void *shmem_pin_map(struct file *file)
        mapping_set_unevictable(file->f_mapping);
        return vaddr;
 err_page:
-       while (--i >= 0)
+       while (i--)
                put_page(pages[i]);
        kvfree(pages);
        return NULL;
index 23a6132..412e216 100644 (file)
@@ -211,8 +211,8 @@ static int igt_gem_ww_ctx(void *arg)
                return PTR_ERR(obj);
 
        obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
-       if (IS_ERR(obj)) {
-               err = PTR_ERR(obj);
+       if (IS_ERR(obj2)) {
+               err = PTR_ERR(obj2);
                goto put1;
        }
 
index 8017c40..7989b7e 100644 (file)
@@ -1269,9 +1269,6 @@ ssize_t rdma_query_gid_table(struct ib_device *device,
        unsigned long flags;
 
        rdma_for_each_port(device, port_num) {
-               if (!rdma_ib_or_roce(device, port_num))
-                       continue;
-
                table = rdma_gid_table(device, port_num);
                read_lock_irqsave(&table->rwlock, flags);
                for (i = 0; i < table->sz; i++) {
index 0121566..5afd142 100644 (file)
@@ -1522,6 +1522,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
                                                            id.local_id);
        if (IS_ERR(cm_id_priv->timewait_info)) {
                ret = PTR_ERR(cm_id_priv->timewait_info);
+               cm_id_priv->timewait_info = NULL;
                goto out;
        }
 
@@ -2114,6 +2115,7 @@ static int cm_req_handler(struct cm_work *work)
                                                            id.local_id);
        if (IS_ERR(cm_id_priv->timewait_info)) {
                ret = PTR_ERR(cm_id_priv->timewait_info);
+               cm_id_priv->timewait_info = NULL;
                goto destroy;
        }
        cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
index 191e084..4e940fc 100644 (file)
@@ -940,8 +940,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                        1);
                EFA_SET(&params.modify_mask,
                        EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
-               params.cur_qp_state = qp_attr->cur_qp_state;
-               params.qp_state = qp_attr->qp_state;
+               params.cur_qp_state = cur_state;
+               params.qp_state = new_state;
        }
 
        if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
index 019642f..511c95b 100644 (file)
@@ -1936,6 +1936,15 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
        }
 
        if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+               qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+               /* calculate the db_rec_db2 data since it is constant so no
+                * need to reflect from user
+                */
+               qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+               qp->urq.db_rec_db2_data.data.value =
+                       cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+
                rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
                                          &qp->urq.db_rec_db2_data,
                                          DB_REC_WIDTH_32B,
index 8964770..494b42a 100644 (file)
 #define DTE_IRQ_REMAP_INTCTL_MASK      (0x3ULL << 60)
 #define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1)
 #define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
-#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
+#define DTE_IRQ_TABLE_LEN       (9ULL << 1)
 #define DTE_IRQ_REMAP_ENABLE    1ULL
 
 #define PAGE_MODE_NONE    0x00
index e4d8446..04b13cd 100644 (file)
@@ -88,13 +88,15 @@ enum pulse8_msgcodes {
        MSGCODE_SET_PHYSICAL_ADDRESS,   /* 0x20 */
        MSGCODE_GET_DEVICE_TYPE,
        MSGCODE_SET_DEVICE_TYPE,
-       MSGCODE_GET_HDMI_VERSION,
+       MSGCODE_GET_HDMI_VERSION,       /* Removed in FW >= 10 */
        MSGCODE_SET_HDMI_VERSION,
        MSGCODE_GET_OSD_NAME,
        MSGCODE_SET_OSD_NAME,
        MSGCODE_WRITE_EEPROM,
        MSGCODE_GET_ADAPTER_TYPE,       /* 0x28 */
        MSGCODE_SET_ACTIVE_SOURCE,
+       MSGCODE_GET_AUTO_POWER_ON,      /* New for FW >= 10 */
+       MSGCODE_SET_AUTO_POWER_ON,
 
        MSGCODE_FRAME_EOM = 0x80,
        MSGCODE_FRAME_ACK = 0x40,
@@ -143,6 +145,8 @@ static const char * const pulse8_msgnames[] = {
        "WRITE_EEPROM",
        "GET_ADAPTER_TYPE",
        "SET_ACTIVE_SOURCE",
+       "GET_AUTO_POWER_ON",
+       "SET_AUTO_POWER_ON",
 };
 
 static const char *pulse8_msgname(u8 cmd)
@@ -579,12 +583,14 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
        if (err)
                goto unlock;
 
-       cmd[0] = MSGCODE_SET_HDMI_VERSION;
-       cmd[1] = adap->log_addrs.cec_version;
-       err = pulse8_send_and_wait(pulse8, cmd, 2,
-                                  MSGCODE_COMMAND_ACCEPTED, 0);
-       if (err)
-               goto unlock;
+       if (pulse8->vers < 10) {
+               cmd[0] = MSGCODE_SET_HDMI_VERSION;
+               cmd[1] = adap->log_addrs.cec_version;
+               err = pulse8_send_and_wait(pulse8, cmd, 2,
+                                          MSGCODE_COMMAND_ACCEPTED, 0);
+               if (err)
+                       goto unlock;
+       }
 
        if (adap->log_addrs.osd_name[0]) {
                size_t osd_len = strlen(adap->log_addrs.osd_name);
@@ -650,7 +656,6 @@ static void pulse8_disconnect(struct serio *serio)
        struct pulse8 *pulse8 = serio_get_drvdata(serio);
 
        cec_unregister_adapter(pulse8->adap);
-       pulse8->serio = NULL;
        serio_set_drvdata(serio, NULL);
        serio_close(serio);
 }
@@ -692,6 +697,14 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
        dev_dbg(pulse8->dev, "Autonomous mode: %s",
                data[0] ? "on" : "off");
 
+       if (pulse8->vers >= 10) {
+               cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
+               err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+               if (!err)
+                       dev_dbg(pulse8->dev, "Auto Power On: %s",
+                               data[0] ? "on" : "off");
+       }
+
        cmd[0] = MSGCODE_GET_DEVICE_TYPE;
        err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
        if (err)
@@ -753,12 +766,15 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
        dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
                cec_phys_addr_exp(*pa));
 
-       cmd[0] = MSGCODE_GET_HDMI_VERSION;
-       err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
-       if (err)
-               return err;
-       log_addrs->cec_version = data[0];
-       dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+       log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
+       if (pulse8->vers < 10) {
+               cmd[0] = MSGCODE_GET_HDMI_VERSION;
+               err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+               if (err)
+                       return err;
+               log_addrs->cec_version = data[0];
+               dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+       }
 
        cmd[0] = MSGCODE_GET_OSD_NAME;
        err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
@@ -830,8 +846,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
        pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
                                            dev_name(&serio->dev), caps, 1);
        err = PTR_ERR_OR_ZERO(pulse8->adap);
-       if (err < 0)
-               goto free_device;
+       if (err < 0) {
+               kfree(pulse8);
+               return err;
+       }
 
        pulse8->dev = &serio->dev;
        serio_set_drvdata(serio, pulse8);
@@ -874,8 +892,6 @@ close_serio:
        serio_close(serio);
 delete_adap:
        cec_delete_adapter(pulse8->adap);
-free_device:
-       kfree(pulse8);
        return err;
 }
 
index 4eab6d8..89e3839 100644 (file)
@@ -414,6 +414,17 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
                vb->index = q->num_buffers + buffer;
                vb->type = q->type;
                vb->memory = memory;
+               /*
+                * We need to set these flags here so that the videobuf2 core
+                * will call ->prepare()/->finish() cache sync/flush on vb2
+                * buffers when appropriate. However, we can avoid explicit
+                * ->prepare() and ->finish() cache sync for DMABUF buffers,
+                * because DMA exporter takes care of it.
+                */
+               if (q->memory != VB2_MEMORY_DMABUF) {
+                       vb->need_cache_sync_on_prepare = 1;
+                       vb->need_cache_sync_on_finish = 1;
+               }
                for (plane = 0; plane < num_planes; ++plane) {
                        vb->planes[plane].length = plane_sizes[plane];
                        vb->planes[plane].min_length = plane_sizes[plane];
index 5051a5e..65a136c 100644 (file)
@@ -151,15 +151,12 @@ static inline u32 mtk_chk_period(struct mtk_ir *ir)
 {
        u32 val;
 
-       /* Period of raw software sampling in ns */
-       val = DIV_ROUND_CLOSEST(1000000000ul,
-                               clk_get_rate(ir->bus) / ir->data->div);
-
        /*
         * Period for software decoder used in the
         * unit of raw software sampling
         */
-       val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val);
+       val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus),
+                               USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE);
 
        dev_dbg(ir->dev, "@pwm clk  = \t%lu\n",
                clk_get_rate(ir->bus) / ir->data->div);
@@ -412,7 +409,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
        mtk_irq_enable(ir, MTK_IRINT_EN);
 
        dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n",
-                DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000));
+                MTK_IR_SAMPLE);
 
        return 0;
 
index 8ad6c07..7838e62 100644 (file)
@@ -504,11 +504,11 @@ void vidtv_channel_si_destroy(struct vidtv_mux *m)
 {
        u32 i;
 
-       vidtv_psi_pat_table_destroy(m->si.pat);
-
        for (i = 0; i < m->si.pat->num_pmt; ++i)
                vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
 
+       vidtv_psi_pat_table_destroy(m->si.pat);
+
        kfree(m->si.pmt_secs);
        vidtv_psi_sdt_table_destroy(m->si.sdt);
        vidtv_psi_nit_table_destroy(m->si.nit);
index 340c9fb..fdc825e 100644 (file)
@@ -420,7 +420,7 @@ void vidtv_psi_desc_assign(struct vidtv_psi_desc **to,
                           struct vidtv_psi_desc *desc);
 
 /**
- * vidtv_psi_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
+ * vidtv_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
  * @pmt: The PMT section that will contain the descriptor loop
  * @to: Where in the PMT to assign this descriptor loop to
  * @desc: The descriptor loop that will be assigned.
@@ -434,7 +434,7 @@ void vidtv_pmt_desc_assign(struct vidtv_psi_table_pmt *pmt,
                           struct vidtv_psi_desc *desc);
 
 /**
- * vidtv_psi_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
+ * vidtv_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
  * @sdt: The SDT that will contain the descriptor loop
  * @to: Where in the PMT to assign this descriptor loop to
  * @desc: The descriptor loop that will be assigned.
@@ -474,7 +474,7 @@ void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt,
 struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc);
 
 /**
- * vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each
+ * vidtv_psi_pmt_create_sec_for_each_pat_entry - Create a PMT section for each
  * program found in the PAT
  * @pat: The PAT to look for programs.
  * @pcr_pid: packet ID for the PCR to be used for the program described in this
@@ -743,7 +743,7 @@ struct vidtv_psi_table_eit {
 struct vidtv_psi_table_eit
 *vidtv_psi_eit_table_init(u16 network_id,
                          u16 transport_stream_id,
-                         u16 service_id);
+                         __be16 service_id);
 
 /**
  * struct vidtv_psi_eit_write_args - Arguments for writing an EIT section
index ce7dd6c..d79b658 100644 (file)
@@ -467,8 +467,10 @@ struct vidtv_encoder
        e->is_video_encoder = false;
 
        ctx = kzalloc(priv_sz, GFP_KERNEL);
-       if (!ctx)
+       if (!ctx) {
+               kfree(e);
                return NULL;
+       }
 
        e->ctx = ctx;
        ctx->last_duration = 0;
index 10838a2..f5e8e1f 100644 (file)
@@ -44,7 +44,7 @@ struct vidtv_mpeg_ts {
                u8 adaptation_field:1;
                u8 scrambling:2;
        } __packed;
-       struct vidtv_mpeg_ts_adaption adaption[];
+       struct vidtv_mpeg_ts_adaption *adaption;
 } __packed;
 
 /**
index 9abfaae..a4e4e15 100644 (file)
@@ -745,6 +745,19 @@ const struct bond_option *bond_opt_get(unsigned int option)
        return &bond_opts[option];
 }
 
+static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
+{
+       if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
+               return;
+
+       if (mode == BOND_MODE_ACTIVEBACKUP)
+               bond_dev->wanted_features |= BOND_XFRM_FEATURES;
+       else
+               bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
+
+       netdev_update_features(bond_dev);
+}
+
 static int bond_option_mode_set(struct bonding *bond,
                                const struct bond_opt_value *newval)
 {
@@ -767,13 +780,8 @@ static int bond_option_mode_set(struct bonding *bond,
        if (newval->value == BOND_MODE_ALB)
                bond->params.tlb_dynamic_lb = 1;
 
-#ifdef CONFIG_XFRM_OFFLOAD
-       if (newval->value == BOND_MODE_ACTIVEBACKUP)
-               bond->dev->wanted_features |= BOND_XFRM_FEATURES;
-       else
-               bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
-       netdev_change_features(bond->dev);
-#endif /* CONFIG_XFRM_OFFLOAD */
+       if (bond->dev->reg_state == NETREG_REGISTERED)
+               bond_set_xfrm_features(bond->dev, newval->value);
 
        /* don't cache arp_validate between modes */
        bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
index 9d2faaa..c9ca8b9 100644 (file)
@@ -382,8 +382,13 @@ static int softing_netdev_open(struct net_device *ndev)
 
        /* check or determine and set bittime */
        ret = open_candev(ndev);
-       if (!ret)
-               ret = softing_startstop(ndev, 1);
+       if (ret)
+               return ret;
+
+       ret = softing_startstop(ndev, 1);
+       if (ret < 0)
+               close_candev(ndev);
+
        return ret;
 }
 
index f791860..c444ef3 100644 (file)
@@ -569,7 +569,6 @@ static int felix_setup(struct dsa_switch *ds)
        struct ocelot *ocelot = ds->priv;
        struct felix *felix = ocelot_to_felix(ocelot);
        int port, err;
-       int tc;
 
        err = felix_init_structs(felix, ds->num_ports);
        if (err)
@@ -608,12 +607,6 @@ static int felix_setup(struct dsa_switch *ds)
        ocelot_write_rix(ocelot,
                         ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
                         ANA_PGID_PGID, PGID_UC);
-       /* Setup the per-traffic class flooding PGIDs */
-       for (tc = 0; tc < FELIX_NUM_TC; tc++)
-               ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
-                                ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
-                                ANA_FLOODING_FLD_UNICAST(PGID_UC),
-                                ANA_FLOODING, tc);
 
        ds->mtu_enforcement_ingress = true;
        ds->configure_vlan_while_not_filtering = true;
index 3e925b8..2e5bbdc 100644 (file)
@@ -1429,6 +1429,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
        pci_set_drvdata(pdev, felix);
        ocelot = &felix->ocelot;
        ocelot->dev = &pdev->dev;
+       ocelot->num_flooding_pgids = FELIX_NUM_TC;
        felix->info = &felix_info_vsc9959;
        felix->switch_base = pci_resource_start(pdev,
                                                felix->info->switch_pci_bar);
index 1d420c4..ebbaf68 100644 (file)
@@ -1210,6 +1210,7 @@ static int seville_probe(struct platform_device *pdev)
 
        ocelot = &felix->ocelot;
        ocelot->dev = &pdev->dev;
+       ocelot->num_flooding_pgids = 1;
        felix->info = &seville_info_vsc9953;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index d92516a..9cd7501 100644 (file)
@@ -21,6 +21,7 @@ config ET131X
        tristate "Agere ET-1310 Gigabit Ethernet support"
        depends on PCI
        select PHYLIB
+       select CRC32
        help
          This driver supports Agere ET-1310 ethernet adapters.
 
index 8585816..e432a68 100644 (file)
@@ -23,6 +23,7 @@ config MACB
        tristate "Cadence MACB/GEM support"
        depends on HAS_DMA && COMMON_CLK
        select PHYLINK
+       select CRC32
        help
          The Cadence MACB ethernet interface is found on many Atmel AT32 and
          AT91 parts.  This driver also supports the Cadence GEM (Gigabit
index 7f90b82..1b7e8c9 100644 (file)
@@ -987,9 +987,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
        struct fw_eth_tx_pkt_wr *wr;
        struct cpl_tx_pkt_core *cpl;
        u32 ctrl, iplen, maclen;
-#if IS_ENABLED(CONFIG_IPV6)
        struct ipv6hdr *ip6;
-#endif
        unsigned int ndesc;
        struct tcphdr *tcp;
        int len16, pktlen;
@@ -1043,17 +1041,15 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
        cpl->len = htons(pktlen);
 
        memcpy(buf, skb->data, pktlen);
-       if (tx_info->ip_family == AF_INET) {
+       if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
                /* we need to correct ip header len */
                ip = (struct iphdr *)(buf + maclen);
                ip->tot_len = htons(pktlen - maclen);
                cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
-#if IS_ENABLED(CONFIG_IPV6)
        } else {
                ip6 = (struct ipv6hdr *)(buf + maclen);
                ip6->payload_len = htons(pktlen - maclen - iplen);
                cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
-#endif
        }
 
        cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
index c2677ec..3d1e9a3 100644 (file)
@@ -33,6 +33,7 @@ config FTGMAC100
        depends on !64BIT || BROKEN
        select PHYLIB
        select MDIO_ASPEED if MACH_ASPEED_G6
+       select CRC32
        help
          This driver supports the FTGMAC100 Gigabit Ethernet controller
          from Faraday. It is used on Faraday A369, Andes AG102 and some
index a1d53dd..3f9175b 100644 (file)
@@ -25,6 +25,7 @@ config FEC
        depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
                   ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
        default ARCH_MXC || SOC_IMX28 if ARM
+       select CRC32
        select PHYLIB
        imply PTP_1588_CLOCK
        help
index 90cd243..828c177 100644 (file)
@@ -269,6 +269,7 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
 
        if (!of_device_is_available(node)) {
                netdev_err(mac->net_dev, "pcs-handle node not available\n");
+               of_node_put(node);
                return -ENODEV;
        }
 
index 8ed1ebd..89e5581 100644 (file)
@@ -143,8 +143,8 @@ static const struct {
        { ENETC_PM0_R255,   "MAC rx 128-255 byte packets" },
        { ENETC_PM0_R511,   "MAC rx 256-511 byte packets" },
        { ENETC_PM0_R1023,  "MAC rx 512-1023 byte packets" },
-       { ENETC_PM0_R1518,  "MAC rx 1024-1518 byte packets" },
-       { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+       { ENETC_PM0_R1522,  "MAC rx 1024-1522 byte packets" },
+       { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
        { ENETC_PM0_ROVR,   "MAC rx oversized packets" },
        { ENETC_PM0_RJBR,   "MAC rx jabber packets" },
        { ENETC_PM0_RFRG,   "MAC rx fragment packets" },
@@ -163,9 +163,13 @@ static const struct {
        { ENETC_PM0_TBCA,   "MAC tx broadcast frames" },
        { ENETC_PM0_TPKT,   "MAC tx packets" },
        { ENETC_PM0_TUND,   "MAC tx undersized packets" },
+       { ENETC_PM0_T64,    "MAC tx 64 byte packets" },
        { ENETC_PM0_T127,   "MAC tx 65-127 byte packets" },
+       { ENETC_PM0_T255,   "MAC tx 128-255 byte packets" },
+       { ENETC_PM0_T511,   "MAC tx 256-511 byte packets" },
        { ENETC_PM0_T1023,  "MAC tx 512-1023 byte packets" },
-       { ENETC_PM0_T1518,  "MAC tx 1024-1518 byte packets" },
+       { ENETC_PM0_T1522,  "MAC tx 1024-1522 byte packets" },
+       { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
        { ENETC_PM0_TCNP,   "MAC tx control packets" },
        { ENETC_PM0_TDFR,   "MAC tx deferred packets" },
        { ENETC_PM0_TMCOL,  "MAC tx multiple collisions" },
index eb6bbf1..4cbf166 100644 (file)
@@ -267,8 +267,8 @@ enum enetc_bdr_type {TX, RX};
 #define ENETC_PM0_R255         0x8180
 #define ENETC_PM0_R511         0x8188
 #define ENETC_PM0_R1023                0x8190
-#define ENETC_PM0_R1518                0x8198
-#define ENETC_PM0_R1519X       0x81A0
+#define ENETC_PM0_R1522                0x8198
+#define ENETC_PM0_R1523X       0x81A0
 #define ENETC_PM0_ROVR         0x81A8
 #define ENETC_PM0_RJBR         0x81B0
 #define ENETC_PM0_RFRG         0x81B8
@@ -287,9 +287,13 @@ enum enetc_bdr_type {TX, RX};
 #define ENETC_PM0_TBCA         0x8250
 #define ENETC_PM0_TPKT         0x8260
 #define ENETC_PM0_TUND         0x8268
+#define ENETC_PM0_T64          0x8270
 #define ENETC_PM0_T127         0x8278
+#define ENETC_PM0_T255         0x8280
+#define ENETC_PM0_T511         0x8288
 #define ENETC_PM0_T1023                0x8290
-#define ENETC_PM0_T1518                0x8298
+#define ENETC_PM0_T1522                0x8298
+#define ENETC_PM0_T1523X       0x82A0
 #define ENETC_PM0_TCNP         0x82C0
 #define ENETC_PM0_TDFR         0x82D0
 #define ENETC_PM0_TMCOL                0x82D8
index 3415018..48bf808 100644 (file)
@@ -4,6 +4,7 @@ config FSL_FMAN
        depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
        select GENERIC_ALLOCATOR
        select PHYLIB
+       select CRC32
        default n
        help
                Freescale Data-Path Acceleration Architecture Frame Manager
index a9066e6..ca2ab6c 100644 (file)
@@ -35,8 +35,6 @@
 
 #define HCLGE_DBG_DFX_SSU_2_OFFSET 12
 
-#pragma pack(1)
-
 struct hclge_qos_pri_map_cmd {
        u8 pri0_tc  : 4,
           pri1_tc  : 4;
@@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info {
        struct hclge_dbg_reg_common_msg reg_msg;
 };
 
-#pragma pack()
-
 static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
        {false, "Reserved"},
        {true,  "BP_CPU_STATE"},
index b30f008..128ab68 100644 (file)
@@ -6475,13 +6475,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
 
        /* Ungate PGCB clock */
        mac_data = er32(FEXTNVM9);
-       mac_data |= BIT(28);
+       mac_data &= ~BIT(28);
        ew32(FEXTNVM9, mac_data);
 
        /* Enable K1 off to enable mPHY Power Gating */
        mac_data = er32(FEXTNVM6);
        mac_data |= BIT(31);
-       ew32(FEXTNVM12, mac_data);
+       ew32(FEXTNVM6, mac_data);
 
        /* Enable mPHY power gating for any link and speed */
        mac_data = er32(FEXTNVM8);
@@ -6525,11 +6525,11 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
        /* Disable K1 off */
        mac_data = er32(FEXTNVM6);
        mac_data &= ~BIT(31);
-       ew32(FEXTNVM12, mac_data);
+       ew32(FEXTNVM6, mac_data);
 
        /* Disable Ungate PGCB clock */
        mac_data = er32(FEXTNVM9);
-       mac_data &= ~BIT(28);
+       mac_data |= BIT(28);
        ew32(FEXTNVM9, mac_data);
 
        /* Cancel not waking from dynamic
index d43ce13..3f5825f 100644 (file)
@@ -1850,6 +1850,7 @@ static inline bool i40e_page_is_reusable(struct page *page)
  * the adapter for another receive
  *
  * @rx_buffer: buffer containing the page
+ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
  *
  * If page is reusable, rx_buffer->page_offset is adjusted to point to
  * an unused region in the page.
@@ -1872,7 +1873,8 @@ static inline bool i40e_page_is_reusable(struct page *page)
  *
  * In either case, if the page is reusable its refcount is increased.
  **/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+                                  int rx_buffer_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
        struct page *page = rx_buffer->page;
@@ -1883,7 +1885,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
 #define I40E_LAST_OFFSET \
@@ -1942,16 +1944,24 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
  * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
  * @rx_ring: rx descriptor ring to transact packets on
  * @size: size of buffer to add to skb
+ * @rx_buffer_pgcnt: buffer page refcount
  *
  * This function will pull an Rx buffer from the ring and synchronize it
  * for use by the CPU.
  */
 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
-                                                const unsigned int size)
+                                                const unsigned int size,
+                                                int *rx_buffer_pgcnt)
 {
        struct i40e_rx_buffer *rx_buffer;
 
        rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+       *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buffer->page);
+#else
+               0;
+#endif
        prefetch_page_address(rx_buffer->page);
 
        /* we are reusing so sync this buffer for CPU use */
@@ -2102,14 +2112,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
  * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: rx buffer to pull data from
+ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
  *
  * This function will clean up the contents of the rx_buffer.  It will
  * either recycle the buffer or unmap it and free the associated resources.
  */
 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
-                              struct i40e_rx_buffer *rx_buffer)
+                              struct i40e_rx_buffer *rx_buffer,
+                              int rx_buffer_pgcnt)
 {
-       if (i40e_can_reuse_rx_page(rx_buffer)) {
+       if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
                /* hand second half of page back to the ring */
                i40e_reuse_rx_page(rx_ring, rx_buffer);
        } else {
@@ -2336,6 +2348,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        while (likely(total_rx_packets < (unsigned int)budget)) {
                struct i40e_rx_buffer *rx_buffer;
                union i40e_rx_desc *rx_desc;
+               int rx_buffer_pgcnt;
                unsigned int size;
                u64 qword;
 
@@ -2378,7 +2391,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        break;
 
                i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
-               rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+               rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
 
                /* retrieve a buffer from the ring */
                if (!skb) {
@@ -2421,7 +2434,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        break;
                }
 
-               i40e_put_rx_buffer(rx_ring, rx_buffer);
+               i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
                cleaned_count++;
 
                if (i40e_is_non_eop(rx_ring, rx_desc, skb))
index eae7526..23eca2f 100644 (file)
@@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
 /**
  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
  * @rx_buf: buffer containing the page
+ * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
  *
  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
  * which will assign the current buffer to the buffer that next_to_alloc is
  * pointing to; otherwise, the DMA mapping needs to be destroyed and
  * page freed
  */
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
        struct page *page = rx_buf->page;
@@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
 #define ICE_LAST_OFFSET \
@@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
  * @rx_ring: Rx descriptor ring to transact packets on
  * @skb: skb to be used
  * @size: size of buffer to add to skb
+ * @rx_buf_pgcnt: rx_buf page refcount
  *
  * This function will pull an Rx buffer from the ring and synchronize it
  * for use by the CPU.
  */
 static struct ice_rx_buf *
 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
-              const unsigned int size)
+              const unsigned int size, int *rx_buf_pgcnt)
 {
        struct ice_rx_buf *rx_buf;
 
        rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+       *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buf->page);
+#else
+               0;
+#endif
        prefetchw(rx_buf->page);
        *skb = rx_buf->skb;
 
@@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
  * ice_put_rx_buf - Clean up used buffer and either recycle or free
  * @rx_ring: Rx descriptor ring to transact packets on
  * @rx_buf: Rx buffer to pull data from
+ * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
  *
  * This function will update next_to_clean and then clean up the contents
  * of the rx_buf. It will either recycle the buffer or unmap it and free
  * the associated resources.
  */
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void
+ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+              int rx_buf_pgcnt)
 {
        u16 ntc = rx_ring->next_to_clean + 1;
 
@@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
        if (!rx_buf)
                return;
 
-       if (ice_can_reuse_rx_page(rx_buf)) {
+       if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
                /* hand second half of page back to the ring */
                ice_reuse_rx_page(rx_ring, rx_buf);
        } else {
@@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                struct sk_buff *skb;
                unsigned int size;
                u16 stat_err_bits;
+               int rx_buf_pgcnt;
                u16 vlan_tag = 0;
                u8 rx_ptype;
 
@@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                dma_rmb();
 
                if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
-                       ice_put_rx_buf(rx_ring, NULL);
+                       ice_put_rx_buf(rx_ring, NULL, 0);
                        cleaned_count++;
                        continue;
                }
@@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                        ICE_RX_FLX_DESC_PKT_LEN_M;
 
                /* retrieve a buffer from the ring */
-               rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+               rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
 
                if (!size) {
                        xdp.data = NULL;
@@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                total_rx_pkts++;
 
                cleaned_count++;
-               ice_put_rx_buf(rx_ring, rx_buf);
+               ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
                continue;
 construct_skb:
                if (skb) {
@@ -1187,7 +1200,7 @@ construct_skb:
                        break;
                }
 
-               ice_put_rx_buf(rx_ring, rx_buf);
+               ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
                cleaned_count++;
 
                /* skip if it is NOP desc */
index 0286d2f..aaa954a 100644 (file)
@@ -138,6 +138,8 @@ struct vf_mac_filter {
 /* this is the size past which hardware will drop packets when setting LPE=0 */
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
+#define IGB_ETH_PKT_HDR_PAD    (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
 /* Supported Rx Buffer Sizes */
 #define IGB_RXBUFFER_256       256
 #define IGB_RXBUFFER_1536      1536
@@ -247,6 +249,9 @@ enum igb_tx_flags {
 #define IGB_SFF_ADDRESSING_MODE                0x4
 #define IGB_SFF_8472_UNSUP             0x00
 
+/* TX resources are shared between XDP and netstack
+ * and we need to tag the buffer type to distinguish them
+ */
 enum igb_tx_buf_type {
        IGB_TYPE_SKB = 0,
        IGB_TYPE_XDP,
index 5fc2c38..0d343d0 100644 (file)
@@ -2824,20 +2824,25 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
        }
 }
 
-static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
 {
-       int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+       int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
        struct igb_adapter *adapter = netdev_priv(dev);
+       struct bpf_prog *prog = bpf->prog, *old_prog;
        bool running = netif_running(dev);
-       struct bpf_prog *old_prog;
        bool need_reset;
 
        /* verify igb ring attributes are sufficient for XDP */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct igb_ring *ring = adapter->rx_ring[i];
 
-               if (frame_size > igb_rx_bufsz(ring))
+               if (frame_size > igb_rx_bufsz(ring)) {
+                       NL_SET_ERR_MSG_MOD(bpf->extack,
+                                          "The RX buffer size is too small for the frame size");
+                       netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
+                                   igb_rx_bufsz(ring), frame_size);
                        return -EINVAL;
+               }
        }
 
        old_prog = xchg(&adapter->xdp_prog, prog);
@@ -2869,7 +2874,7 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 {
        switch (xdp->command) {
        case XDP_SETUP_PROG:
-               return igb_xdp_setup(dev, xdp->prog);
+               return igb_xdp_setup(dev, xdp);
        default:
                return -EINVAL;
        }
@@ -2910,10 +2915,12 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
         */
        tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
        if (unlikely(!tx_ring))
-               return -ENXIO;
+               return IGB_XDP_CONSUMED;
 
        nq = txring_txq(tx_ring);
        __netif_tx_lock(nq, cpu);
+       /* Avoid transmit queue timeout since we share it with the slow path */
+       nq->trans_start = jiffies;
        ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
        __netif_tx_unlock(nq);
 
@@ -2946,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
        nq = txring_txq(tx_ring);
        __netif_tx_lock(nq, cpu);
 
+       /* Avoid transmit queue timeout since we share it with the slow path */
+       nq->trans_start = jiffies;
+
        for (i = 0; i < n; i++) {
                struct xdp_frame *xdpf = frames[i];
                int err;
@@ -3950,8 +3960,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
        /* set default work limits */
        adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
 
-       adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
-                                 VLAN_HLEN;
+       adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
        spin_lock_init(&adapter->nfc_lock);
@@ -6491,7 +6500,7 @@ static void igb_get_stats64(struct net_device *netdev,
 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
-       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+       int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
 
        if (adapter->xdp_prog) {
                int i;
@@ -6500,7 +6509,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
                        struct igb_ring *ring = adapter->rx_ring[i];
 
                        if (max_frame > igb_rx_bufsz(ring)) {
-                               netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+                               netdev_warn(adapter->netdev,
+                                           "Requested MTU size is not supported with XDP. Max frame size is %d\n",
+                                           max_frame);
                                return -EINVAL;
                        }
                }
@@ -8351,6 +8362,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
                                SKB_DATA_ALIGN(xdp->data_end -
                                               xdp->data_hard_start);
 #endif
+       unsigned int metasize = xdp->data - xdp->data_meta;
        struct sk_buff *skb;
 
        /* prefetch first cache line of first page */
@@ -8365,6 +8377,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
        skb_reserve(skb, xdp->data - xdp->data_hard_start);
        __skb_put(skb, xdp->data_end - xdp->data);
 
+       if (metasize)
+               skb_metadata_set(skb, metasize);
+
        /* pull timestamp out of packet data */
        if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
                igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
@@ -8771,7 +8786,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        rx_ring->skb = skb;
 
        if (xdp_xmit & IGB_XDP_REDIR)
-               xdp_do_flush_map();
+               xdp_do_flush();
 
        if (xdp_xmit & IGB_XDP_TX) {
                struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
index 45ae33e..f3f449f 100644 (file)
@@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
        return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+                                   int rx_buffer_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
        struct page *page = rx_buffer->page;
@@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
        /* The last offset is a bit aggressive in that we assume the
@@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
                                                   union ixgbe_adv_rx_desc *rx_desc,
                                                   struct sk_buff **skb,
-                                                  const unsigned int size)
+                                                  const unsigned int size,
+                                                  int *rx_buffer_pgcnt)
 {
        struct ixgbe_rx_buffer *rx_buffer;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buffer->page);
+#else
+               0;
+#endif
        prefetchw(rx_buffer->page);
        *skb = rx_buffer->skb;
 
@@ -2055,9 +2063,10 @@ skip_sync:
 
 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
                                struct ixgbe_rx_buffer *rx_buffer,
-                               struct sk_buff *skb)
+                               struct sk_buff *skb,
+                               int rx_buffer_pgcnt)
 {
-       if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+       if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
                /* hand second half of page back to the ring */
                ixgbe_reuse_rx_page(rx_ring, rx_buffer);
        } else {
@@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                union ixgbe_adv_rx_desc *rx_desc;
                struct ixgbe_rx_buffer *rx_buffer;
                struct sk_buff *skb;
+               int rx_buffer_pgcnt;
                unsigned int size;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                dma_rmb();
 
-               rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+               rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
 
                /* retrieve a buffer from the ring */
                if (!skb) {
@@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        break;
                }
 
-               ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+               ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
                cleaned_count++;
 
                /* place incomplete frames back on ring for completion */
index 0f20e07..da4b286 100644 (file)
@@ -318,8 +318,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
                goto err_port_init;
        }
 
-       if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+       if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
+               err = -EINVAL;
                goto err_port_init;
+       }
 
        /* firmware requires that port's MAC address consist of the first
         * 5 bytes of the base MAC address
index 106513f..6f29031 100644 (file)
@@ -1378,8 +1378,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
                tx_ring->cons, tx_ring->prod);
 
        priv->port_stats.tx_timeout++;
-       en_dbg(DRV, priv, "Scheduling watchdog\n");
-       queue_work(mdev->workqueue, &priv->watchdog_task);
+       if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
+               en_dbg(DRV, priv, "Scheduling port restart\n");
+               queue_work(mdev->workqueue, &priv->restart_task);
+       }
 }
 
 
@@ -1733,6 +1735,7 @@ int mlx4_en_start_port(struct net_device *dev)
                                mlx4_en_deactivate_cq(priv, cq);
                                goto tx_err;
                        }
+                       clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
                        if (t != TX_XDP) {
                                tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
                                tx_ring->recycle_ring = NULL;
@@ -1829,6 +1832,7 @@ int mlx4_en_start_port(struct net_device *dev)
                local_bh_enable();
        }
 
+       clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
        netif_tx_start_all_queues(dev);
        netif_device_attach(dev);
 
@@ -1999,7 +2003,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
 static void mlx4_en_restart(struct work_struct *work)
 {
        struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                watchdog_task);
+                                                restart_task);
        struct mlx4_en_dev *mdev = priv->mdev;
        struct net_device *dev = priv->dev;
 
@@ -2377,7 +2381,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
        if (netif_running(dev)) {
                mutex_lock(&mdev->state_lock);
                if (!mdev->device_up) {
-                       /* NIC is probably restarting - let watchdog task reset
+                       /* NIC is probably restarting - let restart task reset
                         * the port */
                        en_dbg(DRV, priv, "Change MTU called with card down!?\n");
                } else {
@@ -2386,7 +2390,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
                        if (err) {
                                en_err(priv, "Failed restarting port:%d\n",
                                         priv->port);
-                               queue_work(mdev->workqueue, &priv->watchdog_task);
+                               if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
+                                                     &priv->state))
+                                       queue_work(mdev->workqueue, &priv->restart_task);
                        }
                }
                mutex_unlock(&mdev->state_lock);
@@ -2792,7 +2798,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
                if (err) {
                        en_err(priv, "Failed starting port %d for XDP change\n",
                               priv->port);
-                       queue_work(mdev->workqueue, &priv->watchdog_task);
+                       if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+                               queue_work(mdev->workqueue, &priv->restart_task);
                }
        }
 
@@ -3165,7 +3172,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
        spin_lock_init(&priv->stats_lock);
        INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
-       INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+       INIT_WORK(&priv->restart_task, mlx4_en_restart);
        INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
        INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
        INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
index 3ddb726..59b097c 100644 (file)
@@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
        return cnt;
 }
 
+static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
+                                  u16 cqe_index, struct mlx4_en_tx_ring *ring)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_tx_info *tx_info;
+       struct mlx4_en_tx_desc *tx_desc;
+       u16 wqe_index;
+       int desc_size;
+
+       en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
+              ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
+       print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
+                      false);
+
+       wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
+       tx_info = &ring->tx_info[wqe_index];
+       desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
+       en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
+              wqe_index, desc_size);
+       tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
+       print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
+
+       if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+               return;
+
+       en_err(priv, "Scheduling port restart\n");
+       queue_work(mdev->workqueue, &priv->restart_task);
+}
+
 int mlx4_en_process_tx_cq(struct net_device *dev,
                          struct mlx4_en_cq *cq, int napi_budget)
 {
@@ -438,13 +467,10 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
                dma_rmb();
 
                if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
-                            MLX4_CQE_OPCODE_ERROR)) {
-                       struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
-
-                       en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
-                              cqe_err->vendor_err_syndrome,
-                              cqe_err->syndrome);
-               }
+                            MLX4_CQE_OPCODE_ERROR))
+                       if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
+                               mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
+                                                      ring);
 
                /* Skip over last polled CQE */
                new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
index a46efe3..30378e4 100644 (file)
@@ -271,6 +271,10 @@ struct mlx4_en_page_cache {
        } buf[MLX4_EN_CACHE_SIZE];
 };
 
+enum {
+       MLX4_EN_TX_RING_STATE_RECOVERING,
+};
+
 struct mlx4_en_priv;
 
 struct mlx4_en_tx_ring {
@@ -317,6 +321,7 @@ struct mlx4_en_tx_ring {
         * Only queue_stopped might be used if BQL is not properly working.
         */
        unsigned long           queue_stopped;
+       unsigned long           state;
        struct mlx4_hwq_resources sp_wqres;
        struct mlx4_qp          sp_qp;
        struct mlx4_qp_context  sp_context;
@@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap {
        struct mutex mutex; /* for mutual access to stats bitmap */
 };
 
+enum {
+       MLX4_EN_STATE_FLAG_RESTARTING,
+};
+
 struct mlx4_en_priv {
        struct mlx4_en_dev *mdev;
        struct mlx4_en_port_profile *prof;
@@ -595,7 +604,7 @@ struct mlx4_en_priv {
        struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
        struct mlx4_qp drop_qp;
        struct work_struct rx_mode_task;
-       struct work_struct watchdog_task;
+       struct work_struct restart_task;
        struct work_struct linkstate_task;
        struct delayed_work stats_task;
        struct delayed_work service_task;
@@ -641,6 +650,7 @@ struct mlx4_en_priv {
        u32 pflags;
        u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
        u8 rss_hash_fn;
+       unsigned long state;
 };
 
 enum mlx4_en_wol {
index 99f1ec3..3e371d2 100644 (file)
@@ -198,6 +198,7 @@ config MLX5_EN_TLS
 config MLX5_SW_STEERING
        bool "Mellanox Technologies software-managed steering"
        depends on MLX5_CORE_EN && MLX5_ESWITCH
+       select CRC32
        default y
        help
        Build support for software-managed steering in the NIC.
index 31f9a82..d0f6dfe 100644 (file)
@@ -47,6 +47,7 @@ config LAN743X
        depends on PCI
        select PHYLIB
        select CRC16
+       select CRC32
        help
          Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
 
index 70bf8c6..a53bd36 100644 (file)
@@ -1489,10 +1489,11 @@ int ocelot_init(struct ocelot *ocelot)
                     SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
 
        /* Setup flooding PGIDs */
-       ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
-                        ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
-                        ANA_FLOODING_FLD_UNICAST(PGID_UC),
-                        ANA_FLOODING, 0);
+       for (i = 0; i < ocelot->num_flooding_pgids; i++)
+               ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+                                ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+                                ANA_FLOODING_FLD_UNICAST(PGID_UC),
+                                ANA_FLOODING, i);
        ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
                     ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
                     ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
index dc00772..1e77294 100644 (file)
@@ -1254,6 +1254,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
        }
 
        ocelot->num_phys_ports = of_get_child_count(ports);
+       ocelot->num_flooding_pgids = 1;
 
        ocelot->vcap = vsc7514_vcap_props;
        ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
index d8b99d6..b82758d 100644 (file)
@@ -22,6 +22,7 @@ config NFP
        depends on VXLAN || VXLAN=n
        depends on TLS && TLS_DEVICE || TLS_DEVICE=n
        select NET_DEVLINK
+       select CRC32
        help
          This driver supports the Netronome(R) NFP4000/NFP6000 based
          cards working as a advanced Ethernet NIC.  It works with both
index b150da4..4372268 100644 (file)
@@ -3562,9 +3562,6 @@ static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
        struct nfp_net_dp *dp;
        int err;
 
-       if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
-               return -EBUSY;
-
        if (!prog == !nn->dp.xdp_prog) {
                WRITE_ONCE(nn->dp.xdp_prog, prog);
                xdp_attachment_setup(&nn->xdp, bpf);
@@ -3593,9 +3590,6 @@ static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
 {
        int err;
 
-       if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
-               return -EBUSY;
-
        err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
        if (err)
                return err;
index ee83a71..c84997d 100644 (file)
@@ -3,6 +3,7 @@ config LPC_ENET
        tristate "NXP ethernet MAC on LPC devices"
        depends on ARCH_LPC32XX || COMPILE_TEST
        select PHYLIB
+       select CRC32
        help
          Say Y or M here if you want to use the NXP ethernet MAC included on
          some NXP LPC devices. You can safely enable this option for LPC32xx
index 99e1290..2318811 100644 (file)
@@ -19,6 +19,7 @@ if NET_VENDOR_ROCKER
 config ROCKER
        tristate "Rocker switch driver (EXPERIMENTAL)"
        depends on PCI && NET_SWITCHDEV && BRIDGE
+       select CRC32
        help
          This driver supports Rocker switch device.
 
index efef547..223f69d 100644 (file)
@@ -246,13 +246,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
                goto err_parse_dt;
        }
 
-       ret = dma_set_mask_and_coherent(&pdev->dev,
-                                       DMA_BIT_MASK(dwmac->ops->addr_width));
-       if (ret) {
-               dev_err(&pdev->dev, "DMA mask set failed\n");
-               goto err_dma_mask;
-       }
-
+       plat_dat->addr64 = dwmac->ops->addr_width;
        plat_dat->init = imx_dwmac_init;
        plat_dat->exit = imx_dwmac_exit;
        plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
@@ -272,7 +266,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
 err_dwmac_init:
 err_drv_probe:
        imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dma_mask:
 err_parse_dt:
 err_match_data:
        stmmac_remove_config_dt(pdev, plat_dat);
index 5afcf05..6d6bd77 100644 (file)
@@ -30,7 +30,6 @@
 #define PRG_ETH0_EXT_RMII_MODE         4
 
 /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
-#define PRG_ETH0_CLK_M250_SEL_SHIFT    4
 #define PRG_ETH0_CLK_M250_SEL_MASK     GENMASK(4, 4)
 
 /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
@@ -155,8 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
                return -ENOMEM;
 
        clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
-       clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
-       clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+       clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
+       clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
+                                    clk_configs->m250_mux.shift;
        clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
                                         ARRAY_SIZE(mux_parents), &clk_mux_ops,
                                         &clk_configs->m250_mux.hw);
index 6e30d7e..0b4ee2d 100644 (file)
@@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
 
        return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
                                 !(value & DMA_BUS_MODE_SFT_RESET),
-                                10000, 100000);
+                                10000, 1000000);
 }
 
 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
index ba45fe2..c33db79 100644 (file)
@@ -1534,6 +1534,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
 }
 
 /**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+       u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+       u32 queue;
+
+       for (queue = 0; queue < tx_queue_cnt; queue++)
+               dma_free_tx_skbufs(priv, queue);
+}
+
+/**
  * free_dma_rx_desc_resources - free RX dma desc resources
  * @priv: private structure
  */
@@ -2895,9 +2908,6 @@ static int stmmac_release(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 chan;
 
-       if (priv->eee_enabled)
-               del_timer_sync(&priv->eee_ctrl_timer);
-
        if (device_may_wakeup(priv->device))
                phylink_speed_down(priv->phylink, false);
        /* Stop and disconnect the PHY */
@@ -2916,6 +2926,11 @@ static int stmmac_release(struct net_device *dev)
        if (priv->lpi_irq > 0)
                free_irq(priv->lpi_irq, dev);
 
+       if (priv->eee_enabled) {
+               priv->tx_path_in_lpi_mode = false;
+               del_timer_sync(&priv->eee_ctrl_timer);
+       }
+
        /* Stop TX/RX DMA and clear the descriptors */
        stmmac_stop_all_dma(priv);
 
@@ -4930,6 +4945,14 @@ int stmmac_dvr_probe(struct device *device,
                dev_info(priv->device, "SPH feature enabled\n");
        }
 
+       /* The current IP register MAC_HW_Feature1[ADDR64] only define
+        * 32/40/64 bit width, but some SOC support others like i.MX8MP
+        * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+        * So overwrite dma_cap.addr64 according to HW real design.
+        */
+       if (priv->plat->addr64)
+               priv->dma_cap.addr64 = priv->plat->addr64;
+
        if (priv->dma_cap.addr64) {
                ret = dma_set_mask_and_coherent(device,
                                DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -5142,6 +5165,11 @@ int stmmac_suspend(struct device *dev)
        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
                del_timer_sync(&priv->tx_queue[chan].txtimer);
 
+       if (priv->eee_enabled) {
+               priv->tx_path_in_lpi_mode = false;
+               del_timer_sync(&priv->eee_ctrl_timer);
+       }
+
        /* Stop TX/RX DMA */
        stmmac_stop_all_dma(priv);
 
@@ -5247,11 +5275,20 @@ int stmmac_resume(struct device *dev)
                        return ret;
        }
 
+       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+               rtnl_lock();
+               phylink_start(priv->phylink);
+               /* We may have called phylink_speed_down before */
+               phylink_speed_up(priv->phylink);
+               rtnl_unlock();
+       }
+
        rtnl_lock();
        mutex_lock(&priv->lock);
 
        stmmac_reset_queues_param(priv);
 
+       stmmac_free_tx_skbufs(priv);
        stmmac_clear_descriptors(priv);
 
        stmmac_hw_setup(ndev, false);
@@ -5265,14 +5302,6 @@ int stmmac_resume(struct device *dev)
        mutex_unlock(&priv->lock);
        rtnl_unlock();
 
-       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
-               rtnl_lock();
-               phylink_start(priv->phylink);
-               /* We may have called phylink_speed_down before */
-               phylink_speed_up(priv->phylink);
-               rtnl_unlock();
-       }
-
        phylink_mac_change(priv->phylink, true);
 
        netif_device_attach(ndev);
index 31c5e36..424e644 100644 (file)
@@ -1265,9 +1265,6 @@ static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
        if (!priv->xdpi.prog && !prog)
                return 0;
 
-       if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
-               return -EBUSY;
-
        WRITE_ONCE(priv->xdp_prog, prog);
 
        xdp_attachment_setup(&priv->xdpi, bpf);
index 60c199f..0301853 100644 (file)
@@ -1351,7 +1351,6 @@ static int temac_probe(struct platform_device *pdev)
        struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
        struct temac_local *lp;
        struct net_device *ndev;
-       struct resource *res;
        const void *addr;
        __be32 *p;
        bool little_endian;
@@ -1500,13 +1499,11 @@ static int temac_probe(struct platform_device *pdev)
                of_node_put(dma_np);
        } else if (pdata) {
                /* 2nd memory resource specifies DMA registers */
-               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
-                                                    resource_size(res));
-               if (!lp->sdma_regs) {
+               lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
+               if (IS_ERR(lp->sdma_regs)) {
                        dev_err(&pdev->dev,
                                "could not map DMA registers\n");
-                       return -ENOMEM;
+                       return PTR_ERR(lp->sdma_regs);
                }
                if (pdata->dma_little_endian) {
                        lp->dma_in = temac_dma_in32_le;
index 8ae9ce2..1426bfc 100644 (file)
@@ -257,21 +257,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                skb_dst_set(skb, &tun_dst->dst);
 
        /* Ignore packet loops (and multicast echo) */
-       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
-               goto rx_error;
-
-       switch (skb_protocol(skb, true)) {
-       case htons(ETH_P_IP):
-               if (pskb_may_pull(skb, sizeof(struct iphdr)))
-                       goto rx_error;
-               break;
-       case htons(ETH_P_IPV6):
-               if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
-                       goto rx_error;
-               break;
-       default:
-               goto rx_error;
+       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+               geneve->dev->stats.rx_errors++;
+               goto drop;
        }
+
        oiph = skb_network_header(skb);
        skb_reset_network_header(skb);
 
@@ -308,8 +298,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                dev_sw_netstats_rx_add(geneve->dev, len);
 
        return;
-rx_error:
-       geneve->dev->stats.rx_errors++;
 drop:
        /* Consume bad packet */
        kfree_skb(skb);
index e8599bb..6c3ed5b 100644 (file)
@@ -156,6 +156,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
        /* The allocator will give us a power-of-2 number of pages.  But we
         * can't guarantee that, so request it.  That way we won't waste any
         * memory that would be available beyond the required space.
+        *
+        * Note that gsi_trans_pool_exit_dma() assumes the total allocated
+        * size is exactly (count * size).
         */
        total_size = get_order(total_size) << PAGE_SHIFT;
 
@@ -175,7 +178,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
 
 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
 {
-       dma_free_coherent(dev, pool->size, pool->base, pool->addr);
+       size_t total_size = pool->count * pool->size;
+
+       dma_free_coherent(dev, total_size, pool->base, pool->addr);
        memset(pool, 0, sizeof(*pool));
 }
 
index 2e90512..90aafb5 100644 (file)
@@ -63,15 +63,20 @@ static int
 nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
 {
        struct nsim_bpf_bound_prog *state;
+       int ret = 0;
 
        state = env->prog->aux->offload->dev_priv;
        if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
                msleep(state->nsim_dev->bpf_bind_verifier_delay);
 
-       if (insn_idx == env->prog->len - 1)
+       if (insn_idx == env->prog->len - 1) {
                pr_vlog(env, "Hello from netdevsim!\n");
 
-       return 0;
+               if (!state->nsim_dev->bpf_bind_verifier_accept)
+                       ret = -EOPNOTSUPP;
+       }
+
+       return ret;
 }
 
 static int nsim_bpf_finalize(struct bpf_verifier_env *env)
@@ -190,9 +195,6 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
 {
        int err;
 
-       if (!xdp_attachment_flags_ok(xdp, bpf))
-               return -EBUSY;
-
        if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
                NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
                return -EOPNOTSUPP;
@@ -598,6 +600,9 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
                            &nsim_dev->bpf_bind_accept);
        debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
                           &nsim_dev->bpf_bind_verifier_delay);
+       nsim_dev->bpf_bind_verifier_accept = true;
+       debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir,
+                           &nsim_dev->bpf_bind_verifier_accept);
        return 0;
 }
 
index 827fc80..c4e7ad2 100644 (file)
@@ -189,6 +189,7 @@ struct nsim_dev {
        struct dentry *take_snapshot;
        struct bpf_offload_dev *bpf_dev;
        bool bpf_bind_accept;
+       bool bpf_bind_verifier_accept;
        u32 bpf_bind_verifier_delay;
        struct dentry *ddir_bpf_bound_progs;
        u32 prog_id_gen;
index f2793ff..b9b7e00 100644 (file)
@@ -1315,11 +1315,17 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
        int orig_iif = skb->skb_iif;
        bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        bool is_ndisc = ipv6_ndisc_frame(skb);
+       bool is_ll_src;
 
        /* loopback, multicast & non-ND link-local traffic; do not push through
-        * packet taps again. Reset pkt_type for upper layers to process skb
+        * packet taps again. Reset pkt_type for upper layers to process skb.
+        * for packets with lladdr src, however, skip so that the dst can be
+        * determine at input using original ifindex in the case that daddr
+        * needs strict
         */
-       if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
+       is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
+       if (skb->pkt_type == PACKET_LOOPBACK ||
+           (need_strict && !is_ndisc && !is_ll_src)) {
                skb->dev = vrf_dev;
                skb->skb_iif = vrf_dev->ifindex;
                IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
index d6b8495..9c65d56 100644 (file)
@@ -286,14 +286,76 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
 static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
 {
        /*
-        * The signal type is GPIO if the signal name has "GPI" as a prefix.
-        * strncmp (rather than strcmp) is used to implement the prefix
-        * requirement.
+        * We need to differentiate between GPIO and non-GPIO signals to
+        * implement the gpio_request_enable() interface. For better or worse
+        * the ASPEED pinctrl driver uses the expression names to determine
+        * whether an expression will mux a pin for GPIO.
         *
-        * expr->signal might look like "GPIOB1" in the GPIO case.
-        * expr->signal might look like "GPIT0" in the GPI case.
+        * Generally we have the following - A GPIO such as B1 has:
+        *
+        *    - expr->signal set to "GPIOB1"
+        *    - expr->function set to "GPIOB1"
+        *
+        * Using this fact we can determine whether the provided expression is
+        * a GPIO expression by testing the signal name for the string prefix
+        * "GPIO".
+        *
+        * However, some GPIOs are input-only, and the ASPEED datasheets name
+        * them differently. An input-only GPIO such as T0 has:
+        *
+        *    - expr->signal set to "GPIT0"
+        *    - expr->function set to "GPIT0"
+        *
+        * It's tempting to generalise the prefix test from "GPIO" to "GPI" to
+        * account for both GPIOs and GPIs, but in doing so we run aground on
+        * another feature:
+        *
+        * Some pins in the ASPEED BMC SoCs have a "pass-through" GPIO
+        * function where the input state of one pin is replicated as the
+        * output state of another (as if they were shorted together - a mux
+        * configuration that is typically enabled by hardware strapping).
+        * This feature allows the BMC to pass e.g. power button state through
+        * to the host while the BMC is yet to boot, but take control of the
+        * button state once the BMC has booted by muxing each pin as a
+        * separate, pin-specific GPIO.
+        *
+        * Conceptually this pass-through mode is a form of GPIO and is named
+        * as such in the datasheets, e.g. "GPID0". This naming similarity
+        * trips us up with the simple GPI-prefixed-signal-name scheme
+        * discussed above, as the pass-through configuration is not what we
+        * want when muxing a pin as GPIO for the GPIO subsystem.
+        *
+        * On e.g. the AST2400, a pass-through function "GPID0" is grouped on
+        * balls A18 and D16, where we have:
+        *
+        *    For ball A18:
+        *    - expr->signal set to "GPID0IN"
+        *    - expr->function set to "GPID0"
+        *
+        *    For ball D16:
+        *    - expr->signal set to "GPID0OUT"
+        *    - expr->function set to "GPID0"
+        *
+        * By contrast, the pin-specific GPIO expressions for the same pins are
+        * as follows:
+        *
+        *    For ball A18:
+        *    - expr->signal looks like "GPIOD0"
+        *    - expr->function looks like "GPIOD0"
+        *
+        *    For ball D16:
+        *    - expr->signal looks like "GPIOD1"
+        *    - expr->function looks like "GPIOD1"
+        *
+        * Testing both the signal _and_ function names gives us the means
+        * differentiate the pass-through GPIO pinmux configuration from the
+        * pin-specific configuration that the GPIO subsystem is after: An
+        * expression is a pin-specific (non-pass-through) GPIO configuration
+        * if the signal prefix is "GPI" and the signal name matches the
+        * function name.
         */
-       return strncmp(expr->signal, "GPI", 3) == 0;
+       return !strncmp(expr->signal, "GPI", 3) &&
+                       !strcmp(expr->signal, expr->function);
 }
 
 static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
index f86739e..dba5875 100644 (file)
@@ -452,10 +452,11 @@ struct aspeed_sig_desc {
  * evaluation of the descriptors.
  *
  * @signal: The signal name for the priority level on the pin. If the signal
- *          type is GPIO, then the signal name must begin with the string
- *          "GPIO", e.g. GPIOA0, GPIOT4 etc.
+ *          type is GPIO, then the signal name must begin with the
+ *          prefix "GPI", e.g. GPIOA0, GPIT0 etc.
  * @function: The name of the function the signal participates in for the
- *            associated expression
+ *            associated expression. For pin-specific GPIO, the function
+ *            name must match the signal name.
  * @ndescs: The number of signal descriptors in the expression
  * @descs: Pointer to an array of signal descriptors that comprise the
  *         function expression
index d49aab3..394a421 100644 (file)
@@ -1049,7 +1049,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                        break;
                case PIN_CONFIG_INPUT_DEBOUNCE:
                        debounce = readl(db_reg);
-                       debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
                        if (arg)
                                conf |= BYT_DEBOUNCE_EN;
@@ -1058,24 +1057,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 
                        switch (arg) {
                        case 375:
+                               debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
                                debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
                        case 750:
+                               debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
                                debounce |= BYT_DEBOUNCE_PULSE_750US;
                                break;
                        case 1500:
+                               debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
                                debounce |= BYT_DEBOUNCE_PULSE_1500US;
                                break;
                        case 3000:
+                               debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
                                debounce |= BYT_DEBOUNCE_PULSE_3MS;
                                break;
                        case 6000:
+                               debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
                                debounce |= BYT_DEBOUNCE_PULSE_6MS;
                                break;
                        case 12000:
+                               debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
                                debounce |= BYT_DEBOUNCE_PULSE_12MS;
                                break;
                        case 24000:
+                               debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
                                debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
index 1c10ab1..b6ef191 100644 (file)
@@ -442,8 +442,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
        value |= PADCFG0_PMODE_GPIO;
 
        /* Disable input and output buffers */
-       value &= ~PADCFG0_GPIORXDIS;
-       value &= ~PADCFG0_GPIOTXDIS;
+       value |= PADCFG0_GPIORXDIS;
+       value |= PADCFG0_GPIOTXDIS;
 
        /* Disable SCI/SMI/NMI generation */
        value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
index 9bd0e8e..ec435b7 100644 (file)
@@ -16,7 +16,7 @@
 
 #define JSL_PAD_OWN    0x020
 #define JSL_PADCFGLOCK 0x080
-#define JSL_HOSTSW_OWN 0x0b0
+#define JSL_HOSTSW_OWN 0x0c0
 #define JSL_GPI_IS     0x100
 #define JSL_GPI_IE     0x120
 
@@ -65,252 +65,263 @@ static const struct pinctrl_pin_desc jsl_pins[] = {
        PINCTRL_PIN(17, "EMMC_CLK"),
        PINCTRL_PIN(18, "EMMC_RESETB"),
        PINCTRL_PIN(19, "A4WP_PRESENT"),
+       /* SPI */
+       PINCTRL_PIN(20, "SPI0_IO_2"),
+       PINCTRL_PIN(21, "SPI0_IO_3"),
+       PINCTRL_PIN(22, "SPI0_MOSI_IO_0"),
+       PINCTRL_PIN(23, "SPI0_MISO_IO_1"),
+       PINCTRL_PIN(24, "SPI0_TPM_CSB"),
+       PINCTRL_PIN(25, "SPI0_FLASH_0_CSB"),
+       PINCTRL_PIN(26, "SPI0_FLASH_1_CSB"),
+       PINCTRL_PIN(27, "SPI0_CLK"),
+       PINCTRL_PIN(28, "SPI0_CLK_LOOPBK"),
        /* GPP_B */
-       PINCTRL_PIN(20, "CORE_VID_0"),
-       PINCTRL_PIN(21, "CORE_VID_1"),
-       PINCTRL_PIN(22, "VRALERTB"),
-       PINCTRL_PIN(23, "CPU_GP_2"),
-       PINCTRL_PIN(24, "CPU_GP_3"),
-       PINCTRL_PIN(25, "SRCCLKREQB_0"),
-       PINCTRL_PIN(26, "SRCCLKREQB_1"),
-       PINCTRL_PIN(27, "SRCCLKREQB_2"),
-       PINCTRL_PIN(28, "SRCCLKREQB_3"),
-       PINCTRL_PIN(29, "SRCCLKREQB_4"),
-       PINCTRL_PIN(30, "SRCCLKREQB_5"),
-       PINCTRL_PIN(31, "PMCALERTB"),
-       PINCTRL_PIN(32, "SLP_S0B"),
-       PINCTRL_PIN(33, "PLTRSTB"),
-       PINCTRL_PIN(34, "SPKR"),
-       PINCTRL_PIN(35, "GSPI0_CS0B"),
-       PINCTRL_PIN(36, "GSPI0_CLK"),
-       PINCTRL_PIN(37, "GSPI0_MISO"),
-       PINCTRL_PIN(38, "GSPI0_MOSI"),
-       PINCTRL_PIN(39, "GSPI1_CS0B"),
-       PINCTRL_PIN(40, "GSPI1_CLK"),
-       PINCTRL_PIN(41, "GSPI1_MISO"),
-       PINCTRL_PIN(42, "GSPI1_MOSI"),
-       PINCTRL_PIN(43, "DDSP_HPD_A"),
-       PINCTRL_PIN(44, "GSPI0_CLK_LOOPBK"),
-       PINCTRL_PIN(45, "GSPI1_CLK_LOOPBK"),
+       PINCTRL_PIN(29, "CORE_VID_0"),
+       PINCTRL_PIN(30, "CORE_VID_1"),
+       PINCTRL_PIN(31, "VRALERTB"),
+       PINCTRL_PIN(32, "CPU_GP_2"),
+       PINCTRL_PIN(33, "CPU_GP_3"),
+       PINCTRL_PIN(34, "SRCCLKREQB_0"),
+       PINCTRL_PIN(35, "SRCCLKREQB_1"),
+       PINCTRL_PIN(36, "SRCCLKREQB_2"),
+       PINCTRL_PIN(37, "SRCCLKREQB_3"),
+       PINCTRL_PIN(38, "SRCCLKREQB_4"),
+       PINCTRL_PIN(39, "SRCCLKREQB_5"),
+       PINCTRL_PIN(40, "PMCALERTB"),
+       PINCTRL_PIN(41, "SLP_S0B"),
+       PINCTRL_PIN(42, "PLTRSTB"),
+       PINCTRL_PIN(43, "SPKR"),
+       PINCTRL_PIN(44, "GSPI0_CS0B"),
+       PINCTRL_PIN(45, "GSPI0_CLK"),
+       PINCTRL_PIN(46, "GSPI0_MISO"),
+       PINCTRL_PIN(47, "GSPI0_MOSI"),
+       PINCTRL_PIN(48, "GSPI1_CS0B"),
+       PINCTRL_PIN(49, "GSPI1_CLK"),
+       PINCTRL_PIN(50, "GSPI1_MISO"),
+       PINCTRL_PIN(51, "GSPI1_MOSI"),
+       PINCTRL_PIN(52, "DDSP_HPD_A"),
+       PINCTRL_PIN(53, "GSPI0_CLK_LOOPBK"),
+       PINCTRL_PIN(54, "GSPI1_CLK_LOOPBK"),
        /* GPP_A */
-       PINCTRL_PIN(46, "ESPI_IO_0"),
-       PINCTRL_PIN(47, "ESPI_IO_1"),
-       PINCTRL_PIN(48, "ESPI_IO_2"),
-       PINCTRL_PIN(49, "ESPI_IO_3"),
-       PINCTRL_PIN(50, "ESPI_CSB"),
-       PINCTRL_PIN(51, "ESPI_CLK"),
-       PINCTRL_PIN(52, "ESPI_RESETB"),
-       PINCTRL_PIN(53, "SMBCLK"),
-       PINCTRL_PIN(54, "SMBDATA"),
-       PINCTRL_PIN(55, "SMBALERTB"),
-       PINCTRL_PIN(56, "CPU_GP_0"),
-       PINCTRL_PIN(57, "CPU_GP_1"),
-       PINCTRL_PIN(58, "USB2_OCB_1"),
-       PINCTRL_PIN(59, "USB2_OCB_2"),
-       PINCTRL_PIN(60, "USB2_OCB_3"),
-       PINCTRL_PIN(61, "DDSP_HPD_A_TIME_SYNC_0"),
-       PINCTRL_PIN(62, "DDSP_HPD_B"),
-       PINCTRL_PIN(63, "DDSP_HPD_C"),
-       PINCTRL_PIN(64, "USB2_OCB_0"),
-       PINCTRL_PIN(65, "PCHHOTB"),
-       PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+       PINCTRL_PIN(55, "ESPI_IO_0"),
+       PINCTRL_PIN(56, "ESPI_IO_1"),
+       PINCTRL_PIN(57, "ESPI_IO_2"),
+       PINCTRL_PIN(58, "ESPI_IO_3"),
+       PINCTRL_PIN(59, "ESPI_CSB"),
+       PINCTRL_PIN(60, "ESPI_CLK"),
+       PINCTRL_PIN(61, "ESPI_RESETB"),
+       PINCTRL_PIN(62, "SMBCLK"),
+       PINCTRL_PIN(63, "SMBDATA"),
+       PINCTRL_PIN(64, "SMBALERTB"),
+       PINCTRL_PIN(65, "CPU_GP_0"),
+       PINCTRL_PIN(66, "CPU_GP_1"),
+       PINCTRL_PIN(67, "USB2_OCB_1"),
+       PINCTRL_PIN(68, "USB2_OCB_2"),
+       PINCTRL_PIN(69, "USB2_OCB_3"),
+       PINCTRL_PIN(70, "DDSP_HPD_A_TIME_SYNC_0"),
+       PINCTRL_PIN(71, "DDSP_HPD_B"),
+       PINCTRL_PIN(72, "DDSP_HPD_C"),
+       PINCTRL_PIN(73, "USB2_OCB_0"),
+       PINCTRL_PIN(74, "PCHHOTB"),
+       PINCTRL_PIN(75, "ESPI_CLK_LOOPBK"),
        /* GPP_S */
-       PINCTRL_PIN(67, "SNDW1_CLK"),
-       PINCTRL_PIN(68, "SNDW1_DATA"),
-       PINCTRL_PIN(69, "SNDW2_CLK"),
-       PINCTRL_PIN(70, "SNDW2_DATA"),
-       PINCTRL_PIN(71, "SNDW1_CLK"),
-       PINCTRL_PIN(72, "SNDW1_DATA"),
-       PINCTRL_PIN(73, "SNDW4_CLK_DMIC_CLK_0"),
-       PINCTRL_PIN(74, "SNDW4_DATA_DMIC_DATA_0"),
+       PINCTRL_PIN(76, "SNDW1_CLK"),
+       PINCTRL_PIN(77, "SNDW1_DATA"),
+       PINCTRL_PIN(78, "SNDW2_CLK"),
+       PINCTRL_PIN(79, "SNDW2_DATA"),
+       PINCTRL_PIN(80, "SNDW1_CLK"),
+       PINCTRL_PIN(81, "SNDW1_DATA"),
+       PINCTRL_PIN(82, "SNDW4_CLK_DMIC_CLK_0"),
+       PINCTRL_PIN(83, "SNDW4_DATA_DMIC_DATA_0"),
        /* GPP_R */
-       PINCTRL_PIN(75, "HDA_BCLK"),
-       PINCTRL_PIN(76, "HDA_SYNC"),
-       PINCTRL_PIN(77, "HDA_SDO"),
-       PINCTRL_PIN(78, "HDA_SDI_0"),
-       PINCTRL_PIN(79, "HDA_RSTB"),
-       PINCTRL_PIN(80, "HDA_SDI_1"),
-       PINCTRL_PIN(81, "I2S1_SFRM"),
-       PINCTRL_PIN(82, "I2S1_TXD"),
+       PINCTRL_PIN(84, "HDA_BCLK"),
+       PINCTRL_PIN(85, "HDA_SYNC"),
+       PINCTRL_PIN(86, "HDA_SDO"),
+       PINCTRL_PIN(87, "HDA_SDI_0"),
+       PINCTRL_PIN(88, "HDA_RSTB"),
+       PINCTRL_PIN(89, "HDA_SDI_1"),
+       PINCTRL_PIN(90, "I2S1_SFRM"),
+       PINCTRL_PIN(91, "I2S1_TXD"),
        /* GPP_H */
-       PINCTRL_PIN(83, "GPPC_H_0"),
-       PINCTRL_PIN(84, "SD_PWR_EN_B"),
-       PINCTRL_PIN(85, "MODEM_CLKREQ"),
-       PINCTRL_PIN(86, "SX_EXIT_HOLDOFFB"),
-       PINCTRL_PIN(87, "I2C2_SDA"),
-       PINCTRL_PIN(88, "I2C2_SCL"),
-       PINCTRL_PIN(89, "I2C3_SDA"),
-       PINCTRL_PIN(90, "I2C3_SCL"),
-       PINCTRL_PIN(91, "I2C4_SDA"),
-       PINCTRL_PIN(92, "I2C4_SCL"),
-       PINCTRL_PIN(93, "CPU_VCCIO_PWR_GATEB"),
-       PINCTRL_PIN(94, "I2S2_SCLK"),
-       PINCTRL_PIN(95, "I2S2_SFRM"),
-       PINCTRL_PIN(96, "I2S2_TXD"),
-       PINCTRL_PIN(97, "I2S2_RXD"),
-       PINCTRL_PIN(98, "I2S1_SCLK"),
-       PINCTRL_PIN(99, "GPPC_H_16"),
-       PINCTRL_PIN(100, "GPPC_H_17"),
-       PINCTRL_PIN(101, "GPPC_H_18"),
-       PINCTRL_PIN(102, "GPPC_H_19"),
-       PINCTRL_PIN(103, "GPPC_H_20"),
-       PINCTRL_PIN(104, "GPPC_H_21"),
-       PINCTRL_PIN(105, "GPPC_H_22"),
-       PINCTRL_PIN(106, "GPPC_H_23"),
+       PINCTRL_PIN(92, "GPPC_H_0"),
+       PINCTRL_PIN(93, "SD_PWR_EN_B"),
+       PINCTRL_PIN(94, "MODEM_CLKREQ"),
+       PINCTRL_PIN(95, "SX_EXIT_HOLDOFFB"),
+       PINCTRL_PIN(96, "I2C2_SDA"),
+       PINCTRL_PIN(97, "I2C2_SCL"),
+       PINCTRL_PIN(98, "I2C3_SDA"),
+       PINCTRL_PIN(99, "I2C3_SCL"),
+       PINCTRL_PIN(100, "I2C4_SDA"),
+       PINCTRL_PIN(101, "I2C4_SCL"),
+       PINCTRL_PIN(102, "CPU_VCCIO_PWR_GATEB"),
+       PINCTRL_PIN(103, "I2S2_SCLK"),
+       PINCTRL_PIN(104, "I2S2_SFRM"),
+       PINCTRL_PIN(105, "I2S2_TXD"),
+       PINCTRL_PIN(106, "I2S2_RXD"),
+       PINCTRL_PIN(107, "I2S1_SCLK"),
+       PINCTRL_PIN(108, "GPPC_H_16"),
+       PINCTRL_PIN(109, "GPPC_H_17"),
+       PINCTRL_PIN(110, "GPPC_H_18"),
+       PINCTRL_PIN(111, "GPPC_H_19"),
+       PINCTRL_PIN(112, "GPPC_H_20"),
+       PINCTRL_PIN(113, "GPPC_H_21"),
+       PINCTRL_PIN(114, "GPPC_H_22"),
+       PINCTRL_PIN(115, "GPPC_H_23"),
        /* GPP_D */
-       PINCTRL_PIN(107, "SPI1_CSB"),
-       PINCTRL_PIN(108, "SPI1_CLK"),
-       PINCTRL_PIN(109, "SPI1_MISO_IO_1"),
-       PINCTRL_PIN(110, "SPI1_MOSI_IO_0"),
-       PINCTRL_PIN(111, "ISH_I2C0_SDA"),
-       PINCTRL_PIN(112, "ISH_I2C0_SCL"),
-       PINCTRL_PIN(113, "ISH_I2C1_SDA"),
-       PINCTRL_PIN(114, "ISH_I2C1_SCL"),
-       PINCTRL_PIN(115, "ISH_SPI_CSB"),
-       PINCTRL_PIN(116, "ISH_SPI_CLK"),
-       PINCTRL_PIN(117, "ISH_SPI_MISO"),
-       PINCTRL_PIN(118, "ISH_SPI_MOSI"),
-       PINCTRL_PIN(119, "ISH_UART0_RXD"),
-       PINCTRL_PIN(120, "ISH_UART0_TXD"),
-       PINCTRL_PIN(121, "ISH_UART0_RTSB"),
-       PINCTRL_PIN(122, "ISH_UART0_CTSB"),
-       PINCTRL_PIN(123, "SPI1_IO_2"),
-       PINCTRL_PIN(124, "SPI1_IO_3"),
-       PINCTRL_PIN(125, "I2S_MCLK"),
-       PINCTRL_PIN(126, "CNV_MFUART2_RXD"),
-       PINCTRL_PIN(127, "CNV_MFUART2_TXD"),
-       PINCTRL_PIN(128, "CNV_PA_BLANKING"),
-       PINCTRL_PIN(129, "I2C5_SDA"),
-       PINCTRL_PIN(130, "I2C5_SCL"),
-       PINCTRL_PIN(131, "GSPI2_CLK_LOOPBK"),
-       PINCTRL_PIN(132, "SPI1_CLK_LOOPBK"),
+       PINCTRL_PIN(116, "SPI1_CSB"),
+       PINCTRL_PIN(117, "SPI1_CLK"),
+       PINCTRL_PIN(118, "SPI1_MISO_IO_1"),
+       PINCTRL_PIN(119, "SPI1_MOSI_IO_0"),
+       PINCTRL_PIN(120, "ISH_I2C0_SDA"),
+       PINCTRL_PIN(121, "ISH_I2C0_SCL"),
+       PINCTRL_PIN(122, "ISH_I2C1_SDA"),
+       PINCTRL_PIN(123, "ISH_I2C1_SCL"),
+       PINCTRL_PIN(124, "ISH_SPI_CSB"),
+       PINCTRL_PIN(125, "ISH_SPI_CLK"),
+       PINCTRL_PIN(126, "ISH_SPI_MISO"),
+       PINCTRL_PIN(127, "ISH_SPI_MOSI"),
+       PINCTRL_PIN(128, "ISH_UART0_RXD"),
+       PINCTRL_PIN(129, "ISH_UART0_TXD"),
+       PINCTRL_PIN(130, "ISH_UART0_RTSB"),
+       PINCTRL_PIN(131, "ISH_UART0_CTSB"),
+       PINCTRL_PIN(132, "SPI1_IO_2"),
+       PINCTRL_PIN(133, "SPI1_IO_3"),
+       PINCTRL_PIN(134, "I2S_MCLK"),
+       PINCTRL_PIN(135, "CNV_MFUART2_RXD"),
+       PINCTRL_PIN(136, "CNV_MFUART2_TXD"),
+       PINCTRL_PIN(137, "CNV_PA_BLANKING"),
+       PINCTRL_PIN(138, "I2C5_SDA"),
+       PINCTRL_PIN(139, "I2C5_SCL"),
+       PINCTRL_PIN(140, "GSPI2_CLK_LOOPBK"),
+       PINCTRL_PIN(141, "SPI1_CLK_LOOPBK"),
        /* vGPIO */
-       PINCTRL_PIN(133, "CNV_BTEN"),
-       PINCTRL_PIN(134, "CNV_WCEN"),
-       PINCTRL_PIN(135, "CNV_BT_HOST_WAKEB"),
-       PINCTRL_PIN(136, "CNV_BT_IF_SELECT"),
-       PINCTRL_PIN(137, "vCNV_BT_UART_TXD"),
-       PINCTRL_PIN(138, "vCNV_BT_UART_RXD"),
-       PINCTRL_PIN(139, "vCNV_BT_UART_CTS_B"),
-       PINCTRL_PIN(140, "vCNV_BT_UART_RTS_B"),
-       PINCTRL_PIN(141, "vCNV_MFUART1_TXD"),
-       PINCTRL_PIN(142, "vCNV_MFUART1_RXD"),
-       PINCTRL_PIN(143, "vCNV_MFUART1_CTS_B"),
-       PINCTRL_PIN(144, "vCNV_MFUART1_RTS_B"),
-       PINCTRL_PIN(145, "vUART0_TXD"),
-       PINCTRL_PIN(146, "vUART0_RXD"),
-       PINCTRL_PIN(147, "vUART0_CTS_B"),
-       PINCTRL_PIN(148, "vUART0_RTS_B"),
-       PINCTRL_PIN(149, "vISH_UART0_TXD"),
-       PINCTRL_PIN(150, "vISH_UART0_RXD"),
-       PINCTRL_PIN(151, "vISH_UART0_CTS_B"),
-       PINCTRL_PIN(152, "vISH_UART0_RTS_B"),
-       PINCTRL_PIN(153, "vCNV_BT_I2S_BCLK"),
-       PINCTRL_PIN(154, "vCNV_BT_I2S_WS_SYNC"),
-       PINCTRL_PIN(155, "vCNV_BT_I2S_SDO"),
-       PINCTRL_PIN(156, "vCNV_BT_I2S_SDI"),
-       PINCTRL_PIN(157, "vI2S2_SCLK"),
-       PINCTRL_PIN(158, "vI2S2_SFRM"),
-       PINCTRL_PIN(159, "vI2S2_TXD"),
-       PINCTRL_PIN(160, "vI2S2_RXD"),
-       PINCTRL_PIN(161, "vSD3_CD_B"),
+       PINCTRL_PIN(142, "CNV_BTEN"),
+       PINCTRL_PIN(143, "CNV_WCEN"),
+       PINCTRL_PIN(144, "CNV_BT_HOST_WAKEB"),
+       PINCTRL_PIN(145, "CNV_BT_IF_SELECT"),
+       PINCTRL_PIN(146, "vCNV_BT_UART_TXD"),
+       PINCTRL_PIN(147, "vCNV_BT_UART_RXD"),
+       PINCTRL_PIN(148, "vCNV_BT_UART_CTS_B"),
+       PINCTRL_PIN(149, "vCNV_BT_UART_RTS_B"),
+       PINCTRL_PIN(150, "vCNV_MFUART1_TXD"),
+       PINCTRL_PIN(151, "vCNV_MFUART1_RXD"),
+       PINCTRL_PIN(152, "vCNV_MFUART1_CTS_B"),
+       PINCTRL_PIN(153, "vCNV_MFUART1_RTS_B"),
+       PINCTRL_PIN(154, "vUART0_TXD"),
+       PINCTRL_PIN(155, "vUART0_RXD"),
+       PINCTRL_PIN(156, "vUART0_CTS_B"),
+       PINCTRL_PIN(157, "vUART0_RTS_B"),
+       PINCTRL_PIN(158, "vISH_UART0_TXD"),
+       PINCTRL_PIN(159, "vISH_UART0_RXD"),
+       PINCTRL_PIN(160, "vISH_UART0_CTS_B"),
+       PINCTRL_PIN(161, "vISH_UART0_RTS_B"),
+       PINCTRL_PIN(162, "vCNV_BT_I2S_BCLK"),
+       PINCTRL_PIN(163, "vCNV_BT_I2S_WS_SYNC"),
+       PINCTRL_PIN(164, "vCNV_BT_I2S_SDO"),
+       PINCTRL_PIN(165, "vCNV_BT_I2S_SDI"),
+       PINCTRL_PIN(166, "vI2S2_SCLK"),
+       PINCTRL_PIN(167, "vI2S2_SFRM"),
+       PINCTRL_PIN(168, "vI2S2_TXD"),
+       PINCTRL_PIN(169, "vI2S2_RXD"),
+       PINCTRL_PIN(170, "vSD3_CD_B"),
        /* GPP_C */
-       PINCTRL_PIN(162, "GPPC_C_0"),
-       PINCTRL_PIN(163, "GPPC_C_1"),
-       PINCTRL_PIN(164, "GPPC_C_2"),
-       PINCTRL_PIN(165, "GPPC_C_3"),
-       PINCTRL_PIN(166, "GPPC_C_4"),
-       PINCTRL_PIN(167, "GPPC_C_5"),
-       PINCTRL_PIN(168, "SUSWARNB_SUSPWRDNACK"),
-       PINCTRL_PIN(169, "SUSACKB"),
-       PINCTRL_PIN(170, "UART0_RXD"),
-       PINCTRL_PIN(171, "UART0_TXD"),
-       PINCTRL_PIN(172, "UART0_RTSB"),
-       PINCTRL_PIN(173, "UART0_CTSB"),
-       PINCTRL_PIN(174, "UART1_RXD"),
-       PINCTRL_PIN(175, "UART1_TXD"),
-       PINCTRL_PIN(176, "UART1_RTSB"),
-       PINCTRL_PIN(177, "UART1_CTSB"),
-       PINCTRL_PIN(178, "I2C0_SDA"),
-       PINCTRL_PIN(179, "I2C0_SCL"),
-       PINCTRL_PIN(180, "I2C1_SDA"),
-       PINCTRL_PIN(181, "I2C1_SCL"),
-       PINCTRL_PIN(182, "UART2_RXD"),
-       PINCTRL_PIN(183, "UART2_TXD"),
-       PINCTRL_PIN(184, "UART2_RTSB"),
-       PINCTRL_PIN(185, "UART2_CTSB"),
+       PINCTRL_PIN(171, "GPPC_C_0"),
+       PINCTRL_PIN(172, "GPPC_C_1"),
+       PINCTRL_PIN(173, "GPPC_C_2"),
+       PINCTRL_PIN(174, "GPPC_C_3"),
+       PINCTRL_PIN(175, "GPPC_C_4"),
+       PINCTRL_PIN(176, "GPPC_C_5"),
+       PINCTRL_PIN(177, "SUSWARNB_SUSPWRDNACK"),
+       PINCTRL_PIN(178, "SUSACKB"),
+       PINCTRL_PIN(179, "UART0_RXD"),
+       PINCTRL_PIN(180, "UART0_TXD"),
+       PINCTRL_PIN(181, "UART0_RTSB"),
+       PINCTRL_PIN(182, "UART0_CTSB"),
+       PINCTRL_PIN(183, "UART1_RXD"),
+       PINCTRL_PIN(184, "UART1_TXD"),
+       PINCTRL_PIN(185, "UART1_RTSB"),
+       PINCTRL_PIN(186, "UART1_CTSB"),
+       PINCTRL_PIN(187, "I2C0_SDA"),
+       PINCTRL_PIN(188, "I2C0_SCL"),
+       PINCTRL_PIN(189, "I2C1_SDA"),
+       PINCTRL_PIN(190, "I2C1_SCL"),
+       PINCTRL_PIN(191, "UART2_RXD"),
+       PINCTRL_PIN(192, "UART2_TXD"),
+       PINCTRL_PIN(193, "UART2_RTSB"),
+       PINCTRL_PIN(194, "UART2_CTSB"),
        /* HVCMOS */
-       PINCTRL_PIN(186, "L_BKLTEN"),
-       PINCTRL_PIN(187, "L_BKLTCTL"),
-       PINCTRL_PIN(188, "L_VDDEN"),
-       PINCTRL_PIN(189, "SYS_PWROK"),
-       PINCTRL_PIN(190, "SYS_RESETB"),
-       PINCTRL_PIN(191, "MLK_RSTB"),
+       PINCTRL_PIN(195, "L_BKLTEN"),
+       PINCTRL_PIN(196, "L_BKLTCTL"),
+       PINCTRL_PIN(197, "L_VDDEN"),
+       PINCTRL_PIN(198, "SYS_PWROK"),
+       PINCTRL_PIN(199, "SYS_RESETB"),
+       PINCTRL_PIN(200, "MLK_RSTB"),
        /* GPP_E */
-       PINCTRL_PIN(192, "ISH_GP_0"),
-       PINCTRL_PIN(193, "ISH_GP_1"),
-       PINCTRL_PIN(194, "IMGCLKOUT_1"),
-       PINCTRL_PIN(195, "ISH_GP_2"),
-       PINCTRL_PIN(196, "IMGCLKOUT_2"),
-       PINCTRL_PIN(197, "SATA_LEDB"),
-       PINCTRL_PIN(198, "IMGCLKOUT_3"),
-       PINCTRL_PIN(199, "ISH_GP_3"),
-       PINCTRL_PIN(200, "ISH_GP_4"),
-       PINCTRL_PIN(201, "ISH_GP_5"),
-       PINCTRL_PIN(202, "ISH_GP_6"),
-       PINCTRL_PIN(203, "ISH_GP_7"),
-       PINCTRL_PIN(204, "IMGCLKOUT_4"),
-       PINCTRL_PIN(205, "DDPA_CTRLCLK"),
-       PINCTRL_PIN(206, "DDPA_CTRLDATA"),
-       PINCTRL_PIN(207, "DDPB_CTRLCLK"),
-       PINCTRL_PIN(208, "DDPB_CTRLDATA"),
-       PINCTRL_PIN(209, "DDPC_CTRLCLK"),
-       PINCTRL_PIN(210, "DDPC_CTRLDATA"),
-       PINCTRL_PIN(211, "IMGCLKOUT_5"),
-       PINCTRL_PIN(212, "CNV_BRI_DT"),
-       PINCTRL_PIN(213, "CNV_BRI_RSP"),
-       PINCTRL_PIN(214, "CNV_RGI_DT"),
-       PINCTRL_PIN(215, "CNV_RGI_RSP"),
+       PINCTRL_PIN(201, "ISH_GP_0"),
+       PINCTRL_PIN(202, "ISH_GP_1"),
+       PINCTRL_PIN(203, "IMGCLKOUT_1"),
+       PINCTRL_PIN(204, "ISH_GP_2"),
+       PINCTRL_PIN(205, "IMGCLKOUT_2"),
+       PINCTRL_PIN(206, "SATA_LEDB"),
+       PINCTRL_PIN(207, "IMGCLKOUT_3"),
+       PINCTRL_PIN(208, "ISH_GP_3"),
+       PINCTRL_PIN(209, "ISH_GP_4"),
+       PINCTRL_PIN(210, "ISH_GP_5"),
+       PINCTRL_PIN(211, "ISH_GP_6"),
+       PINCTRL_PIN(212, "ISH_GP_7"),
+       PINCTRL_PIN(213, "IMGCLKOUT_4"),
+       PINCTRL_PIN(214, "DDPA_CTRLCLK"),
+       PINCTRL_PIN(215, "DDPA_CTRLDATA"),
+       PINCTRL_PIN(216, "DDPB_CTRLCLK"),
+       PINCTRL_PIN(217, "DDPB_CTRLDATA"),
+       PINCTRL_PIN(218, "DDPC_CTRLCLK"),
+       PINCTRL_PIN(219, "DDPC_CTRLDATA"),
+       PINCTRL_PIN(220, "IMGCLKOUT_5"),
+       PINCTRL_PIN(221, "CNV_BRI_DT"),
+       PINCTRL_PIN(222, "CNV_BRI_RSP"),
+       PINCTRL_PIN(223, "CNV_RGI_DT"),
+       PINCTRL_PIN(224, "CNV_RGI_RSP"),
        /* GPP_G */
-       PINCTRL_PIN(216, "SD3_CMD"),
-       PINCTRL_PIN(217, "SD3_D0"),
-       PINCTRL_PIN(218, "SD3_D1"),
-       PINCTRL_PIN(219, "SD3_D2"),
-       PINCTRL_PIN(220, "SD3_D3"),
-       PINCTRL_PIN(221, "SD3_CDB"),
-       PINCTRL_PIN(222, "SD3_CLK"),
-       PINCTRL_PIN(223, "SD3_WP"),
+       PINCTRL_PIN(225, "SD3_CMD"),
+       PINCTRL_PIN(226, "SD3_D0"),
+       PINCTRL_PIN(227, "SD3_D1"),
+       PINCTRL_PIN(228, "SD3_D2"),
+       PINCTRL_PIN(229, "SD3_D3"),
+       PINCTRL_PIN(230, "SD3_CDB"),
+       PINCTRL_PIN(231, "SD3_CLK"),
+       PINCTRL_PIN(232, "SD3_WP"),
 };
 
 static const struct intel_padgroup jsl_community0_gpps[] = {
        JSL_GPP(0, 0, 19, 320),                         /* GPP_F */
-       JSL_GPP(1, 20, 45, 32),                         /* GPP_B */
-       JSL_GPP(2, 46, 66, 64),                         /* GPP_A */
-       JSL_GPP(3, 67, 74, 96),                         /* GPP_S */
-       JSL_GPP(4, 75, 82, 128),                        /* GPP_R */
+       JSL_GPP(1, 20, 28, INTEL_GPIO_BASE_NOMAP),      /* SPI */
+       JSL_GPP(2, 29, 54, 32),                         /* GPP_B */
+       JSL_GPP(3, 55, 75, 64),                         /* GPP_A */
+       JSL_GPP(4, 76, 83, 96),                         /* GPP_S */
+       JSL_GPP(5, 84, 91, 128),                        /* GPP_R */
 };
 
 static const struct intel_padgroup jsl_community1_gpps[] = {
-       JSL_GPP(0, 83, 106, 160),                       /* GPP_H */
-       JSL_GPP(1, 107, 132, 192),                      /* GPP_D */
-       JSL_GPP(2, 133, 161, 224),                      /* vGPIO */
-       JSL_GPP(3, 162, 185, 256),                      /* GPP_C */
+       JSL_GPP(0, 92, 115, 160),                       /* GPP_H */
+       JSL_GPP(1, 116, 141, 192),                      /* GPP_D */
+       JSL_GPP(2, 142, 170, 224),                      /* vGPIO */
+       JSL_GPP(3, 171, 194, 256),                      /* GPP_C */
 };
 
 static const struct intel_padgroup jsl_community4_gpps[] = {
-       JSL_GPP(0, 186, 191, INTEL_GPIO_BASE_NOMAP),    /* HVCMOS */
-       JSL_GPP(1, 192, 215, 288),                      /* GPP_E */
+       JSL_GPP(0, 195, 200, INTEL_GPIO_BASE_NOMAP),    /* HVCMOS */
+       JSL_GPP(1, 201, 224, 288),                      /* GPP_E */
 };
 
 static const struct intel_padgroup jsl_community5_gpps[] = {
-       JSL_GPP(0, 216, 223, INTEL_GPIO_BASE_ZERO),     /* GPP_G */
+       JSL_GPP(0, 225, 232, INTEL_GPIO_BASE_ZERO),     /* GPP_G */
 };
 
 static const struct intel_community jsl_communities[] = {
-       JSL_COMMUNITY(0, 0, 82, jsl_community0_gpps),
-       JSL_COMMUNITY(1, 83, 185, jsl_community1_gpps),
-       JSL_COMMUNITY(2, 186, 215, jsl_community4_gpps),
-       JSL_COMMUNITY(3, 216, 223, jsl_community5_gpps),
+       JSL_COMMUNITY(0, 0, 91, jsl_community0_gpps),
+       JSL_COMMUNITY(1, 92, 194, jsl_community1_gpps),
+       JSL_COMMUNITY(2, 195, 224, jsl_community4_gpps),
+       JSL_COMMUNITY(3, 225, 232, jsl_community5_gpps),
 };
 
 static const struct intel_pinctrl_soc_data jsl_soc_data = {
@@ -336,7 +347,6 @@ static struct platform_driver jsl_pinctrl_driver = {
                .pm = &jsl_pinctrl_pm_ops,
        },
 };
-
 module_platform_driver(jsl_pinctrl_driver);
 
 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
index e4ff8da..3ae141e 100644 (file)
@@ -745,6 +745,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
                mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
                bits |= BUFCFG_PU_EN;
 
+               /* Set default strength value in case none is given */
+               if (arg == 1)
+                       arg = 20000;
+
                switch (arg) {
                case 50000:
                        bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
@@ -765,6 +769,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
                mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
                bits |= BUFCFG_PD_EN;
 
+               /* Set default strength value in case none is given */
+               if (arg == 1)
+                       arg = 20000;
+
                switch (arg) {
                case 50000:
                        bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
index 4aea3e0..899c16c 100644 (file)
@@ -429,7 +429,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
                pin_reg &= ~BIT(LEVEL_TRIG_OFF);
                pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
                pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
-               pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
                irq_set_handler_locked(d, handle_edge_irq);
                break;
 
@@ -437,7 +436,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
                pin_reg &= ~BIT(LEVEL_TRIG_OFF);
                pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
                pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
-               pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
                irq_set_handler_locked(d, handle_edge_irq);
                break;
 
@@ -445,7 +443,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
                pin_reg &= ~BIT(LEVEL_TRIG_OFF);
                pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
                pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
-               pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
                irq_set_handler_locked(d, handle_edge_irq);
                break;
 
@@ -453,8 +450,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
                pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
                pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
                pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
-               pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
-               pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
                irq_set_handler_locked(d, handle_level_irq);
                break;
 
@@ -462,8 +457,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
                pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
                pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
                pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
-               pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
-               pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
                irq_set_handler_locked(d, handle_level_irq);
                break;
 
index 41cd66f..e158d3d 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/poll.h>
 #include <linux/vmalloc.h>
 #include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -114,10 +113,6 @@ unsigned int enable_sdev_max_qd;
 module_param(enable_sdev_max_qd, int, 0444);
 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
 
-int host_tagset_enable = 1;
-module_param(host_tagset_enable, int, 0444);
-MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
-
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3124,19 +3119,6 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
        return 0;
 }
 
-static int megasas_map_queues(struct Scsi_Host *shost)
-{
-       struct megasas_instance *instance;
-
-       instance = (struct megasas_instance *)shost->hostdata;
-
-       if (shost->nr_hw_queues == 1)
-               return 0;
-
-       return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
-                       instance->pdev, instance->low_latency_index_start);
-}
-
 static void megasas_aen_polling(struct work_struct *work);
 
 /**
@@ -3445,7 +3427,6 @@ static struct scsi_host_template megasas_template = {
        .eh_timed_out = megasas_reset_timer,
        .shost_attrs = megaraid_host_attrs,
        .bios_param = megasas_bios_param,
-       .map_queues = megasas_map_queues,
        .change_queue_depth = scsi_change_queue_depth,
        .max_segment_size = 0xffffffff,
 };
@@ -6827,26 +6808,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
        host->max_lun = MEGASAS_MAX_LUN;
        host->max_cmd_len = 16;
 
-       /* Use shared host tagset only for fusion adaptors
-        * if there are managed interrupts (smp affinity enabled case).
-        * Single msix_vectors in kdump, so shared host tag is also disabled.
-        */
-
-       host->host_tagset = 0;
-       host->nr_hw_queues = 1;
-
-       if ((instance->adapter_type != MFI_SERIES) &&
-               (instance->msix_vectors > instance->low_latency_index_start) &&
-               host_tagset_enable &&
-               instance->smp_affinity_enable) {
-               host->host_tagset = 1;
-               host->nr_hw_queues = instance->msix_vectors -
-                       instance->low_latency_index_start;
-       }
-
-       dev_info(&instance->pdev->dev,
-               "Max firmware commands: %d shared with nr_hw_queues = %d\n",
-               instance->max_fw_cmds, host->nr_hw_queues);
        /*
         * Notify the mid-layer about the new controller
         */
index fd60728..b0c01cf 100644 (file)
@@ -359,29 +359,24 @@ megasas_get_msix_index(struct megasas_instance *instance,
 {
        int sdev_busy;
 
-       /* TBD - if sml remove device_busy in future, driver
-        * should track counter in internal structure.
-        */
-       sdev_busy = atomic_read(&scmd->device->device_busy);
+       /* nr_hw_queue = 1 for MegaRAID */
+       struct blk_mq_hw_ctx *hctx =
+               scmd->device->request_queue->queue_hw_ctx[0];
+
+       sdev_busy = atomic_read(&hctx->nr_active);
 
        if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
-           sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
+           sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
                cmd->request_desc->SCSIIO.MSIxIndex =
                        mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
                                        MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
-       } else if (instance->msix_load_balance) {
+       else if (instance->msix_load_balance)
                cmd->request_desc->SCSIIO.MSIxIndex =
                        (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
                                instance->msix_vectors));
-       } else if (instance->host->nr_hw_queues > 1) {
-               u32 tag = blk_mq_unique_tag(scmd->request);
-
-               cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
-                       instance->low_latency_index_start;
-       } else {
+       else
                cmd->request_desc->SCSIIO.MSIxIndex =
                        instance->reply_map[raw_smp_processor_id()];
-       }
 }
 
 /**
@@ -961,6 +956,9 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
        if (megasas_alloc_cmdlist_fusion(instance))
                goto fail_exit;
 
+       dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
+                instance->max_fw_cmds);
+
        /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
        io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
        io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1104,9 +1102,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
                instance->perf_mode = MR_BALANCED_PERF_MODE;
 
-       dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
-               MEGASAS_PERF_MODE_2STR(instance->perf_mode),
-               instance->low_latency_index_start);
+       dev_info(&instance->pdev->dev, "Performance mode :%s\n",
+               MEGASAS_PERF_MODE_2STR(instance->perf_mode));
 
        instance->fw_sync_cache_support = (scratch_pad_1 &
                MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
index 6c5900d..e38bb1e 100644 (file)
@@ -230,6 +230,9 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
 
        _enter(",%s", name);
 
+       if (fc->source)
+               return invalf(fc, "kAFS: Multiple sources not supported");
+
        if (!name) {
                printk(KERN_ERR "kAFS: no volume name specified\n");
                return -EINVAL;
index 88e1763..e2a488d 100644 (file)
@@ -205,3 +205,12 @@ config NFS_DISABLE_UDP_SUPPORT
         Choose Y here to disable the use of NFS over UDP. NFS over UDP
         on modern networks (1Gb+) can lead to data corruption caused by
         fragmentation during high loads.
+
+config NFS_V4_2_READ_PLUS
+       bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation"
+       depends on NFS_V4_2
+       default n
+       help
+        This is intended for developers only. The READ_PLUS operation has
+        been shown to have issues under specific conditions and should not
+        be used in production.
index a163533..24bf579 100644 (file)
@@ -838,7 +838,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
        struct nfs_pgio_mirror *pgm;
        struct nfs4_ff_layout_mirror *mirror;
        struct nfs4_pnfs_ds *ds;
-       u32 ds_idx, i;
+       u32 ds_idx;
 
 retry:
        ff_layout_pg_check_layout(pgio, req);
@@ -864,11 +864,9 @@ retry:
                goto retry;
        }
 
-       for (i = 0; i < pgio->pg_mirror_count; i++) {
-               mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
-               pgm = &pgio->pg_mirrors[i];
-               pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
-       }
+       mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
+       pgm = &pgio->pg_mirrors[0];
+       pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
 
        pgio->pg_mirror_idx = ds_idx;
 
@@ -985,6 +983,21 @@ out:
        return 1;
 }
 
+static u32
+ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+       u32 old = desc->pg_mirror_idx;
+
+       desc->pg_mirror_idx = idx;
+       return old;
+}
+
+static struct nfs_pgio_mirror *
+ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+       return &desc->pg_mirrors[idx];
+}
+
 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
        .pg_init = ff_layout_pg_init_read,
        .pg_test = pnfs_generic_pg_test,
@@ -998,6 +1011,8 @@ static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
        .pg_doio = pnfs_generic_pg_writepages,
        .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
        .pg_cleanup = pnfs_generic_pg_cleanup,
+       .pg_get_mirror = ff_layout_pg_get_mirror_write,
+       .pg_set_mirror = ff_layout_pg_set_mirror_write,
 };
 
 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
index 2b2211d..4fc61e3 100644 (file)
@@ -1241,12 +1241,13 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
                .rpc_resp       = &res,
        };
        u32 xdrlen;
-       int ret, np;
+       int ret, np, i;
 
 
+       ret = -ENOMEM;
        res.scratch = alloc_page(GFP_KERNEL);
        if (!res.scratch)
-               return -ENOMEM;
+               goto out;
 
        xdrlen = nfs42_listxattr_xdrsize(buflen);
        if (xdrlen > server->lxasize)
@@ -1254,9 +1255,12 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
        np = xdrlen / PAGE_SIZE + 1;
 
        pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
-       if (pages == NULL) {
-               __free_page(res.scratch);
-               return -ENOMEM;
+       if (!pages)
+               goto out_free_scratch;
+       for (i = 0; i < np; i++) {
+               pages[i] = alloc_page(GFP_KERNEL);
+               if (!pages[i])
+                       goto out_free_pages;
        }
 
        arg.xattr_pages = pages;
@@ -1271,14 +1275,15 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
                *eofp = res.eof;
        }
 
+out_free_pages:
        while (--np >= 0) {
                if (pages[np])
                        __free_page(pages[np]);
        }
-
-       __free_page(res.scratch);
        kfree(pages);
-
+out_free_scratch:
+       __free_page(res.scratch);
+out:
        return ret;
 
 }
index 6e060a8..8432bd6 100644 (file)
@@ -1528,7 +1528,6 @@ static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req,
 
        rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count,
            hdr.replen);
-       req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
 
        encode_nops(&hdr);
 }
index 9d354de..57b3821 100644 (file)
@@ -377,10 +377,10 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
                goto out_stateowner;
 
        set_bit(NFS_SRV_SSC_COPY_STATE, &ctx->state->flags);
-       set_bit(NFS_OPEN_STATE, &ctx->state->flags);
        memcpy(&ctx->state->open_stateid.other, &stateid->other,
               NFS4_STATEID_OTHER_SIZE);
        update_open_stateid(ctx->state, stateid, NULL, filep->f_mode);
+       set_bit(NFS_OPEN_STATE, &ctx->state->flags);
 
        nfs_file_set_open_context(filep, ctx);
        put_nfs_open_context(ctx);
index 9e0ca9b..e894686 100644 (file)
@@ -5309,7 +5309,7 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
                                    nfs4_read_done_cb(task, hdr);
 }
 
-#ifdef CONFIG_NFS_V4_2
+#if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
 static void nfs42_read_plus_support(struct nfs_server *server, struct rpc_message *msg)
 {
        if (server->caps & NFS_CAP_READ_PLUS)
index 6985cac..78c9c4b 100644 (file)
 static struct kmem_cache *nfs_page_cachep;
 static const struct rpc_call_ops nfs_pgio_common_ops;
 
+static struct nfs_pgio_mirror *
+nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+       if (desc->pg_ops->pg_get_mirror)
+               return desc->pg_ops->pg_get_mirror(desc, idx);
+       return &desc->pg_mirrors[0];
+}
+
 struct nfs_pgio_mirror *
 nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
 {
-       return &desc->pg_mirrors[desc->pg_mirror_idx];
+       return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx);
 }
 EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
 
+static u32
+nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
+{
+       if (desc->pg_ops->pg_set_mirror)
+               return desc->pg_ops->pg_set_mirror(desc, idx);
+       return desc->pg_mirror_idx;
+}
+
 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
                       struct nfs_pgio_header *hdr,
                       void (*release)(struct nfs_pgio_header *hdr))
@@ -1259,7 +1275,7 @@ static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
                return;
 
        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
-               mirror = &desc->pg_mirrors[midx];
+               mirror = nfs_pgio_get_mirror(desc, midx);
                desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
                                desc->pg_error);
        }
@@ -1293,12 +1309,12 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                        goto out_failed;
                }
 
-               desc->pg_mirror_idx = midx;
+               nfs_pgio_set_current_mirror(desc, midx);
                if (!nfs_pageio_add_request_mirror(desc, dupreq))
                        goto out_cleanup_subreq;
        }
 
-       desc->pg_mirror_idx = 0;
+       nfs_pgio_set_current_mirror(desc, 0);
        if (!nfs_pageio_add_request_mirror(desc, req))
                goto out_failed;
 
@@ -1320,10 +1336,12 @@ out_failed:
 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
                                       u32 mirror_idx)
 {
-       struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
-       u32 restore_idx = desc->pg_mirror_idx;
+       struct nfs_pgio_mirror *mirror;
+       u32 restore_idx;
+
+       restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx);
+       mirror = nfs_pgio_current_mirror(desc);
 
-       desc->pg_mirror_idx = mirror_idx;
        for (;;) {
                nfs_pageio_doio(desc);
                if (desc->pg_error < 0 || !mirror->pg_recoalesce)
@@ -1331,7 +1349,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
                if (!nfs_do_recoalesce(desc))
                        break;
        }
-       desc->pg_mirror_idx = restore_idx;
+       nfs_pgio_set_current_mirror(desc, restore_idx);
 }
 
 /*
@@ -1405,7 +1423,7 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
        u32 midx;
 
        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
-               mirror = &desc->pg_mirrors[midx];
+               mirror = nfs_pgio_get_mirror(desc, midx);
                if (!list_empty(&mirror->pg_list)) {
                        prev = nfs_list_entry(mirror->pg_list.prev);
                        if (index != prev->wb_index + 1) {
index 217aa27..ee5a235 100644 (file)
@@ -1599,11 +1599,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
 
        src = *ppos;
        svpfn = src / PM_ENTRY_BYTES;
-       start_vaddr = svpfn << PAGE_SHIFT;
        end_vaddr = mm->task_size;
 
        /* watch out for wraparound */
-       if (svpfn > mm->task_size >> PAGE_SHIFT)
+       start_vaddr = end_vaddr;
+       if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
+               start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
+
+       /* Ensure the address is inside the task */
+       if (start_vaddr > mm->task_size)
                start_vaddr = end_vaddr;
 
        /*
index 3b20e21..03a369c 100644 (file)
@@ -168,12 +168,14 @@ EXPORT_SYMBOL(seq_read);
 ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct seq_file *m = iocb->ki_filp->private_data;
-       size_t size = iov_iter_count(iter);
        size_t copied = 0;
        size_t n;
        void *p;
        int err = 0;
 
+       if (!iov_iter_count(iter))
+               return 0;
+
        mutex_lock(&m->lock);
 
        /*
@@ -206,36 +208,34 @@ ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                if (!m->buf)
                        goto Enomem;
        }
-       /* if not empty - flush it first */
+       // something left in the buffer - copy it out first
        if (m->count) {
-               n = min(m->count, size);
-               if (copy_to_iter(m->buf + m->from, n, iter) != n)
-                       goto Efault;
+               n = copy_to_iter(m->buf + m->from, m->count, iter);
                m->count -= n;
                m->from += n;
-               size -= n;
                copied += n;
-               if (!size)
+               if (m->count)   // hadn't managed to copy everything
                        goto Done;
        }
-       /* we need at least one record in buffer */
+       // get a non-empty record in the buffer
        m->from = 0;
        p = m->op->start(m, &m->index);
        while (1) {
                err = PTR_ERR(p);
-               if (!p || IS_ERR(p))
+               if (!p || IS_ERR(p))    // EOF or an error
                        break;
                err = m->op->show(m, p);
-               if (err < 0)
+               if (err < 0)            // hard error
                        break;
-               if (unlikely(err))
+               if (unlikely(err))      // ->show() says "skip it"
                        m->count = 0;
-               if (unlikely(!m->count)) {
+               if (unlikely(!m->count)) { // empty record
                        p = m->op->next(m, p, &m->index);
                        continue;
                }
-               if (m->count < m->size)
+               if (!seq_has_overflowed(m)) // got it
                        goto Fill;
+               // need a bigger buffer
                m->op->stop(m, p);
                kvfree(m->buf);
                m->count = 0;
@@ -244,11 +244,14 @@ ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                        goto Enomem;
                p = m->op->start(m, &m->index);
        }
+       // EOF or an error
        m->op->stop(m, p);
        m->count = 0;
        goto Done;
 Fill:
-       /* they want more? let's try to get some more */
+       // one non-empty record is in the buffer; if they want more,
+       // try to fit more in, but in any case we need to advance
+       // the iterator once for every record shown.
        while (1) {
                size_t offs = m->count;
                loff_t pos = m->index;
@@ -259,30 +262,27 @@ Fill:
                                            m->op->next);
                        m->index++;
                }
-               if (!p || IS_ERR(p)) {
-                       err = PTR_ERR(p);
+               if (!p || IS_ERR(p))    // no next record for us
                        break;
-               }
-               if (m->count >= size)
+               if (m->count >= iov_iter_count(iter))
                        break;
                err = m->op->show(m, p);
-               if (seq_has_overflowed(m) || err) {
+               if (err > 0) {          // ->show() says "skip it"
                        m->count = offs;
-                       if (likely(err <= 0))
-                               break;
+               } else if (err || seq_has_overflowed(m)) {
+                       m->count = offs;
+                       break;
                }
        }
        m->op->stop(m, p);
-       n = min(m->count, size);
-       if (copy_to_iter(m->buf, n, iter) != n)
-               goto Efault;
+       n = copy_to_iter(m->buf, m->count, iter);
        copied += n;
        m->count -= n;
        m->from = n;
 Done:
-       if (!copied)
-               copied = err;
-       else {
+       if (unlikely(!copied)) {
+               copied = m->count ? -EFAULT : err;
+       else {
                iocb->ki_pos += copied;
                m->read_pos += copied;
        }
@@ -291,9 +291,6 @@ Done:
 Enomem:
        err = -ENOMEM;
        goto Done;
-Efault:
-       err = -EFAULT;
-       goto Done;
 }
 EXPORT_SYMBOL(seq_read_iter);
 
index e3a0be2..7bb66e1 100644 (file)
@@ -77,4 +77,9 @@
 #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
 
+#ifdef __GENKSYMS__
+/* genksyms gets confused by _Static_assert */
+#define _Static_assert(expr, ...)
+#endif
+
 #endif /* _LINUX_BUILD_BUG_H */
index 46c3d69..de51c1b 100644 (file)
@@ -104,6 +104,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
 #endif
 }
 
+#if defined(CONFIG_UM) || defined(CONFIG_IA64)
 /*
  * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
  * extra segments containing the gate DSO contents.  Dumping its
@@ -118,5 +119,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
 extern int
 elf_core_write_extra_data(struct coredump_params *cprm);
 extern size_t elf_core_extra_data_size(void);
+#else
+static inline Elf_Half elf_core_extra_phdrs(void)
+{
+       return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+       return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+       return 1;
+}
+
+static inline size_t elf_core_extra_data_size(void)
+{
+       return 0;
+}
+#endif
 
 #endif /* _LINUX_ELFCORE_H */
index 5deb099..8ebb641 100644 (file)
@@ -227,7 +227,7 @@ struct xt_table {
        unsigned int valid_hooks;
 
        /* Man behind the curtain... */
-       struct xt_table_info *private;
+       struct xt_table_info __rcu *private;
 
        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
        struct module *me;
@@ -448,6 +448,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
 
 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
 
+struct xt_table_info
+*xt_table_get_private_protected(const struct xt_table *table);
+
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
 
index c32c152..f0373a6 100644 (file)
@@ -55,6 +55,7 @@ struct nfs_page {
        unsigned short          wb_nio;         /* Number of I/O attempts */
 };
 
+struct nfs_pgio_mirror;
 struct nfs_pageio_descriptor;
 struct nfs_pageio_ops {
        void    (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
@@ -64,6 +65,9 @@ struct nfs_pageio_ops {
        unsigned int    (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
                                       struct nfs_page *);
        void    (*pg_cleanup)(struct nfs_pageio_descriptor *);
+       struct nfs_pgio_mirror *
+               (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32);
+       u32     (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32);
 };
 
 struct nfs_rw_ops {
index bc27254..3964262 100644 (file)
@@ -869,7 +869,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
 
 static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
 {
-       return -EOPNOTSUPP;
+       return cap_inode_getsecurity(inode, name, buffer, alloc);
 }
 
 static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
index 628e289..15ca6b4 100644 (file)
@@ -170,6 +170,7 @@ struct plat_stmmacenet_data {
        int unicast_filter_entries;
        int tx_fifo_size;
        int rx_fifo_size;
+       u32 addr64;
        u32 rx_queues_to_use;
        u32 tx_queues_to_use;
        u8 rx_sched_algorithm;
index d9d0ff3..adc3da7 100644 (file)
 #define bond_for_each_slave_rcu(bond, pos, iter) \
        netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
 
-#ifdef CONFIG_XFRM_OFFLOAD
 #define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
                            NETIF_F_GSO_ESP)
-#endif /* CONFIG_XFRM_OFFLOAD */
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 extern atomic_t netpoll_block_tx;
index 55b4cad..c1c0a4f 100644 (file)
@@ -1524,4 +1524,8 @@ void __init nft_chain_route_init(void);
 void nft_chain_route_fini(void);
 
 void nf_tables_trans_destroy_flush_work(void);
+
+int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
+__be64 nf_jiffies64_to_msecs(u64 input);
+
 #endif /* _NET_NF_TABLES_H */
index 3814fb6..9dab2bc 100644 (file)
@@ -240,8 +240,6 @@ struct xdp_attachment_info {
 };
 
 struct netdev_bpf;
-bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
-                            struct netdev_bpf *bpf);
 void xdp_attachment_setup(struct xdp_attachment_info *info,
                          struct netdev_bpf *bpf);
 
index 1e9db95..49b46df 100644 (file)
@@ -618,6 +618,9 @@ struct ocelot {
        /* Keep track of the vlan port masks */
        u32                             vlan_mask[VLAN_N_VID];
 
+       /* Switches like VSC9959 have flooding per traffic class */
+       int                             num_flooding_pgids;
+
        /* In tables like ANA:PORT and the ANA:PGID:PGID mask,
         * the CPU is located after the physical ports (at the
         * num_phys_ports index).
index e6ceac3..556216d 100644 (file)
@@ -3897,8 +3897,8 @@ union bpf_attr {
        FN(seq_printf_btf),             \
        FN(skb_cgroup_classid),         \
        FN(redirect_neigh),             \
-       FN(bpf_per_cpu_ptr),            \
-       FN(bpf_this_cpu_ptr),           \
+       FN(per_cpu_ptr),                \
+       FN(this_cpu_ptr),               \
        FN(redirect_peer),              \
        /* */
 
index 1f97c03..55b74d7 100644 (file)
@@ -535,7 +535,7 @@ extern unsigned long __initramfs_size;
 #include <linux/initrd.h>
 #include <linux/kexec.h>
 
-void __weak free_initrd_mem(unsigned long start, unsigned long end)
+void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
 {
 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
        unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
index af601b9..6c9f199 100644 (file)
@@ -97,7 +97,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
 obj-$(CONFIG_LATENCYTOP) += latencytop.o
-obj-$(CONFIG_ELFCORE) += elfcore.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace/
 obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_TRACE_CLOCK) += trace/
index 25520f5..deda118 100644 (file)
@@ -717,9 +717,9 @@ bpf_base_func_proto(enum bpf_func_id func_id)
                return &bpf_snprintf_btf_proto;
        case BPF_FUNC_jiffies64:
                return &bpf_jiffies64_proto;
-       case BPF_FUNC_bpf_per_cpu_ptr:
+       case BPF_FUNC_per_cpu_ptr:
                return &bpf_per_cpu_ptr_proto;
-       case BPF_FUNC_bpf_this_cpu_ptr:
+       case BPF_FUNC_this_cpu_ptr:
                return &bpf_this_cpu_ptr_proto;
        default:
                break;
index 1388bf7..53fe6ef 100644 (file)
@@ -1298,9 +1298,7 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
 
 static bool __reg64_bound_s32(s64 a)
 {
-       if (a > S32_MIN && a < S32_MAX)
-               return true;
-       return false;
+       return a > S32_MIN && a < S32_MAX;
 }
 
 static bool __reg64_bound_u32(u64 a)
@@ -1314,10 +1312,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
 {
        __mark_reg32_unbounded(reg);
 
-       if (__reg64_bound_s32(reg->smin_value))
+       if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
                reg->s32_min_value = (s32)reg->smin_value;
-       if (__reg64_bound_s32(reg->smax_value))
                reg->s32_max_value = (s32)reg->smax_value;
+       }
        if (__reg64_bound_u32(reg->umin_value))
                reg->u32_min_value = (u32)reg->umin_value;
        if (__reg64_bound_u32(reg->umax_value))
@@ -4895,6 +4893,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
 
        ret_reg->smax_value = meta->msize_max_value;
        ret_reg->s32_max_value = meta->msize_max_value;
+       ret_reg->smin_value = -MAX_ERRNO;
+       ret_reg->s32_min_value = -MAX_ERRNO;
        __reg_deduce_bounds(ret_reg);
        __reg_bound_offset(ret_reg);
        __update_reg_bounds(ret_reg);
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
deleted file mode 100644 (file)
index 57fb4dc..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/elf.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/binfmts.h>
-#include <linux/elfcore.h>
-
-Elf_Half __weak elf_core_extra_phdrs(void)
-{
-       return 0;
-}
-
-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
-{
-       return 1;
-}
-
-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
-{
-       return 1;
-}
-
-size_t __weak elf_core_extra_data_size(void)
-{
-       return 0;
-}
index 048c655..a125ea5 100644 (file)
@@ -1337,9 +1337,9 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
-       case BPF_FUNC_bpf_per_cpu_ptr:
+       case BPF_FUNC_per_cpu_ptr:
                return &bpf_per_cpu_ptr_proto;
-       case BPF_FUNC_bpf_this_cpu_ptr:
+       case BPF_FUNC_this_cpu_ptr:
                return &bpf_this_cpu_ptr_proto;
        default:
                return NULL;
index 7d53c5b..0613418 100644 (file)
@@ -163,7 +163,8 @@ static union trace_eval_map_item *trace_eval_maps;
 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 
 int tracing_set_tracer(struct trace_array *tr, const char *buf);
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_array *tr,
+                                  struct trace_buffer *buffer,
                                   unsigned long flags, int pc);
 
 #define MAX_TRACER_SIZE                100
@@ -2870,7 +2871,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
         * two. They are not that meaningful.
         */
        ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
-       ftrace_trace_userstack(buffer, flags, pc);
+       ftrace_trace_userstack(tr, buffer, flags, pc);
 }
 
 /*
@@ -3056,13 +3057,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
 static DEFINE_PER_CPU(int, user_stack_count);
 
 static void
-ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
+ftrace_trace_userstack(struct trace_array *tr,
+                      struct trace_buffer *buffer, unsigned long flags, int pc)
 {
        struct trace_event_call *call = &event_user_stack;
        struct ring_buffer_event *event;
        struct userstack_entry *entry;
 
-       if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
+       if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
 
        /*
@@ -3101,7 +3103,8 @@ ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
        preempt_enable();
 }
 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
-static void ftrace_trace_userstack(struct trace_buffer *buffer,
+static void ftrace_trace_userstack(struct trace_array *tr,
+                                  struct trace_buffer *buffer,
                                   unsigned long flags, int pc)
 {
 }
index ce45af5..d415fc7 100644 (file)
@@ -107,7 +107,7 @@ obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
 # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
 # get appended last to CFLAGS and thus override those previous compiler options.
 #
-FPU_CFLAGS := -mhard-float -msse -msse2
+FPU_CFLAGS := -msse -msse2
 ifdef CONFIG_CC_IS_GCC
 # Stack alignment mismatch, proceed with caution.
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -120,6 +120,7 @@ ifdef CONFIG_CC_IS_GCC
 #  -mpreferred-stack-boundary=3 is not between 4 and 12
 #
 # can be triggered. Otherwise gcc doesn't complain.
+FPU_CFLAGS += -mhard-float
 FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
 endif
 
index 331f426..0b2067b 100644 (file)
@@ -827,7 +827,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
-static noinline int __add_to_page_cache_locked(struct page *page,
+noinline int __add_to_page_cache_locked(struct page *page,
                                        struct address_space *mapping,
                                        pgoff_t offset, gfp_t gfp,
                                        void **shadowp)
index 37f15c3..d029d93 100644 (file)
@@ -1216,6 +1216,7 @@ static void destroy_compound_gigantic_page(struct page *page,
        }
 
        set_compound_order(page, 0);
+       page[1].compound_nr = 0;
        __ClearPageHead(page);
 }
 
index 4c53758..0e3f849 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/srcu.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/cpuhotplug.h>
 
 #include "../slab.h"
 #include "kasan.h"
@@ -43,6 +44,7 @@ struct qlist_head {
        struct qlist_node *head;
        struct qlist_node *tail;
        size_t bytes;
+       bool offline;
 };
 
 #define QLIST_INIT { NULL, NULL, 0 }
@@ -188,6 +190,10 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
        local_irq_save(flags);
 
        q = this_cpu_ptr(&cpu_quarantine);
+       if (q->offline) {
+               local_irq_restore(flags);
+               return;
+       }
        qlist_put(q, &info->quarantine_link, cache->size);
        if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
                qlist_move_all(q, &temp);
@@ -328,3 +334,36 @@ void quarantine_remove_cache(struct kmem_cache *cache)
 
        synchronize_srcu(&remove_cache_srcu);
 }
+
+static int kasan_cpu_online(unsigned int cpu)
+{
+       this_cpu_ptr(&cpu_quarantine)->offline = false;
+       return 0;
+}
+
+static int kasan_cpu_offline(unsigned int cpu)
+{
+       struct qlist_head *q;
+
+       q = this_cpu_ptr(&cpu_quarantine);
+       /* Ensure the ordering between the writing to q->offline and
+        * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
+        * by interrupt.
+        */
+       WRITE_ONCE(q->offline, true);
+       barrier();
+       qlist_free_all(q, NULL);
+       return 0;
+}
+
+static int __init kasan_cpu_quarantine_init(void)
+{
+       int ret = 0;
+
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
+                               kasan_cpu_online, kasan_cpu_offline);
+       if (ret < 0)
+               pr_err("kasan cpu quarantine register failed [%d]\n", ret);
+       return ret;
+}
+late_initcall(kasan_cpu_quarantine_init);
index a8d8d48..13f5677 100644 (file)
@@ -1204,8 +1204,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
                goto put_pid;
        }
 
-       if (task->mm != current->mm &&
-                       !process_madvise_behavior_valid(behavior)) {
+       if (!process_madvise_behavior_valid(behavior)) {
                ret = -EINVAL;
                goto release_task;
        }
index 7730c8f..d3ea9d0 100644 (file)
@@ -177,6 +177,9 @@ static int br_dev_open(struct net_device *dev)
        br_stp_enable_bridge(br);
        br_multicast_open(br);
 
+       if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+               br_multicast_join_snoopers(br);
+
        return 0;
 }
 
@@ -197,6 +200,9 @@ static int br_dev_stop(struct net_device *dev)
        br_stp_disable_bridge(br);
        br_multicast_stop(br);
 
+       if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+               br_multicast_leave_snoopers(br);
+
        netif_stop_queue(dev);
 
        return 0;
index eae898c..54cb82a 100644 (file)
@@ -3286,7 +3286,7 @@ static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
 }
 #endif
 
-static void br_multicast_join_snoopers(struct net_bridge *br)
+void br_multicast_join_snoopers(struct net_bridge *br)
 {
        br_ip4_multicast_join_snoopers(br);
        br_ip6_multicast_join_snoopers(br);
@@ -3317,7 +3317,7 @@ static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
 }
 #endif
 
-static void br_multicast_leave_snoopers(struct net_bridge *br)
+void br_multicast_leave_snoopers(struct net_bridge *br)
 {
        br_ip4_multicast_leave_snoopers(br);
        br_ip6_multicast_leave_snoopers(br);
@@ -3336,9 +3336,6 @@ static void __br_multicast_open(struct net_bridge *br,
 
 void br_multicast_open(struct net_bridge *br)
 {
-       if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
-               br_multicast_join_snoopers(br);
-
        __br_multicast_open(br, &br->ip4_own_query);
 #if IS_ENABLED(CONFIG_IPV6)
        __br_multicast_open(br, &br->ip6_own_query);
@@ -3354,9 +3351,6 @@ void br_multicast_stop(struct net_bridge *br)
        del_timer_sync(&br->ip6_other_query.timer);
        del_timer_sync(&br->ip6_own_query.timer);
 #endif
-
-       if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
-               br_multicast_leave_snoopers(br);
 }
 
 void br_multicast_dev_del(struct net_bridge *br)
@@ -3487,6 +3481,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
 {
        struct net_bridge_port *port;
+       bool change_snoopers = false;
 
        spin_lock_bh(&br->multicast_lock);
        if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
@@ -3495,7 +3490,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
        br_mc_disabled_update(br->dev, val);
        br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
        if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
-               br_multicast_leave_snoopers(br);
+               change_snoopers = true;
                goto unlock;
        }
 
@@ -3506,9 +3501,30 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
        list_for_each_entry(port, &br->port_list, list)
                __br_multicast_enable_port(port);
 
+       change_snoopers = true;
+
 unlock:
        spin_unlock_bh(&br->multicast_lock);
 
+       /* br_multicast_join_snoopers has the potential to cause
+        * an MLD Report/Leave to be delivered to br_multicast_rcv,
+        * which would in turn call br_multicast_add_group, which would
+        * attempt to acquire multicast_lock. This function should be
+        * called after the lock has been released to avoid deadlocks on
+        * multicast_lock.
+        *
+        * br_multicast_leave_snoopers does not have the problem since
+        * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
+        * returns without calling br_multicast_ipv4/6_rcv if it's not
+        * enabled. Moved both functions out just for symmetry.
+        */
+       if (change_snoopers) {
+               if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+                       br_multicast_join_snoopers(br);
+               else
+                       br_multicast_leave_snoopers(br);
+       }
+
        return 0;
 }
 
index 345118e..8424464 100644 (file)
@@ -792,6 +792,8 @@ void br_multicast_del_port(struct net_bridge_port *port);
 void br_multicast_enable_port(struct net_bridge_port *port);
 void br_multicast_disable_port(struct net_bridge_port *port);
 void br_multicast_init(struct net_bridge *br);
+void br_multicast_join_snoopers(struct net_bridge *br);
+void br_multicast_leave_snoopers(struct net_bridge *br);
 void br_multicast_open(struct net_bridge *br);
 void br_multicast_stop(struct net_bridge *br);
 void br_multicast_dev_del(struct net_bridge *br);
@@ -969,6 +971,14 @@ static inline void br_multicast_init(struct net_bridge *br)
 {
 }
 
+static inline void br_multicast_join_snoopers(struct net_bridge *br)
+{
+}
+
+static inline void br_multicast_leave_snoopers(struct net_bridge *br)
+{
+}
+
 static inline void br_multicast_open(struct net_bridge *br)
 {
 }
index 3e493eb..08c7741 100644 (file)
@@ -266,8 +266,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
                }
 
                masterv = br_vlan_get_master(br, v->vid, extack);
-               if (!masterv)
+               if (!masterv) {
+                       err = -ENOMEM;
                        goto out_filt;
+               }
                v->brvlan = masterv;
                if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
                        v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
index d78ab13..26bdc3c 100644 (file)
@@ -1157,6 +1157,9 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
        if (level != SOL_CAN_ISOTP)
                return -EINVAL;
 
+       if (so->bound)
+               return -EISCONN;
+
        switch (optname) {
        case CAN_ISOTP_OPTS:
                if (optlen != sizeof(struct can_isotp_options))
index 8588ade..38412e7 100644 (file)
@@ -8917,6 +8917,17 @@ static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
        return dev->xdp_state[mode].prog;
 }
 
+static u8 dev_xdp_prog_count(struct net_device *dev)
+{
+       u8 count = 0;
+       int i;
+
+       for (i = 0; i < __MAX_XDP_MODE; i++)
+               if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
+                       count++;
+       return count;
+}
+
 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
 {
        struct bpf_prog *prog = dev_xdp_prog(dev, mode);
@@ -9007,6 +9018,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
                          struct bpf_xdp_link *link, struct bpf_prog *new_prog,
                          struct bpf_prog *old_prog, u32 flags)
 {
+       unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
        struct bpf_prog *cur_prog;
        enum bpf_xdp_mode mode;
        bpf_op_t bpf_op;
@@ -9022,11 +9034,17 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
                NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
                return -EINVAL;
        }
-       /* just one XDP mode bit should be set, zero defaults to SKB mode */
-       if (hweight32(flags & XDP_FLAGS_MODES) > 1) {
+       /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
+       if (num_modes > 1) {
                NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
                return -EINVAL;
        }
+       /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
+       if (!num_modes && dev_xdp_prog_count(dev) > 1) {
+               NL_SET_ERR_MSG(extack,
+                              "More than one program loaded, unset mode is ambiguous");
+               return -EINVAL;
+       }
        /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
        if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
                NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
index d4474c8..715b67f 100644 (file)
@@ -381,10 +381,8 @@ static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
 
        list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
                if (this->release == release &&
-                   this->indr.cb_priv == cb_priv) {
+                   this->indr.cb_priv == cb_priv)
                        list_move(&this->indr.list, cleanup_list);
-                       return;
-               }
        }
 }
 
index 7d34382..2f7940b 100644 (file)
@@ -39,12 +39,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
 {
        int ret;
 
-       /* Preempt disable is needed to protect per-cpu redirect_info between
-        * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
-        * access to maps strictly require a rcu_read_lock() for protection,
-        * mixing with BH RCU lock doesn't work.
+       /* Migration disable and BH disable are needed to protect per-cpu
+        * redirect_info between BPF prog and skb_do_redirect().
         */
-       preempt_disable();
+       migrate_disable();
+       local_bh_disable();
        bpf_compute_data_pointers(skb);
        ret = bpf_prog_run_save_cb(lwt->prog, skb);
 
@@ -78,7 +77,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
                break;
        }
 
-       preempt_enable();
+       local_bh_enable();
+       migrate_enable();
 
        return ret;
 }
index 48aba93..d900ceb 100644 (file)
@@ -335,11 +335,10 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
  * scenarios (e.g. queue full), it is possible to return the xdp_frame
  * while still leveraging this protection.  The @napi_direct boolean
  * is used for those calls sites.  Thus, allowing for faster recycling
- * of xdp_frames/pages in those cases. This path is never used by the
- * MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
- * the switch-statement.
+ * of xdp_frames/pages in those cases.
  */
-static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
+static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+                        struct xdp_buff *xdp)
 {
        struct xdp_mem_allocator *xa;
        struct page *page;
@@ -361,6 +360,10 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
                page = virt_to_page(data); /* Assumes order0 page*/
                put_page(page);
                break;
+       case MEM_TYPE_XSK_BUFF_POOL:
+               /* NB! Only valid from an xdp_buff! */
+               xsk_buff_free(xdp);
+               break;
        default:
                /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
                WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
@@ -370,19 +373,19 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
 
 void xdp_return_frame(struct xdp_frame *xdpf)
 {
-       __xdp_return(xdpf->data, &xdpf->mem, false);
+       __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
 }
 EXPORT_SYMBOL_GPL(xdp_return_frame);
 
 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
 {
-       __xdp_return(xdpf->data, &xdpf->mem, true);
+       __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
 }
 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
 
 void xdp_return_buff(struct xdp_buff *xdp)
 {
-       __xdp_return(xdp->data, &xdp->rxq->mem, true);
+       __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
 }
 
 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
@@ -400,18 +403,6 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
 }
 EXPORT_SYMBOL_GPL(__xdp_release_frame);
 
-bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
-                            struct netdev_bpf *bpf)
-{
-       if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
-               NL_SET_ERR_MSG(bpf->extack,
-                              "program loaded with different flags");
-               return false;
-       }
-       return true;
-}
-EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
-
 void xdp_attachment_setup(struct xdp_attachment_info *info,
                          struct netdev_bpf *bpf)
 {
index 1fb3603..0515d66 100644 (file)
@@ -628,6 +628,8 @@ int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
                        return ret;
 
                change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]);
+               if (change_bits > nbits)
+                       change_bits = nbits;
                bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]),
                                  change_bits);
                if (change_bits < nbits)
index b87140a..cdf6ec5 100644 (file)
@@ -825,7 +825,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
        if (has_gw && has_via) {
                NL_SET_ERR_MSG(extack,
                               "Nexthop configuration can not contain both GATEWAY and VIA");
-               goto errout;
+               return -EINVAL;
        }
 
        return 0;
index d1e04d2..563b62b 100644 (file)
@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = READ_ONCE(table->private); /* Address dependency. */
+       private = rcu_access_pointer(table->private);
        cpu     = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
 
        /* We need atomic snapshot of counters: rest doesn't change
         * (other than comefrom, which userspace doesn't care
@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct arpt_entry *e;
        struct xt_counters *counters;
-       struct xt_table_info *private = table->private;
+       struct xt_table_info *private = xt_table_get_private_protected(table);
        int ret = 0;
        void *loc_cpu_entry;
 
@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
        if (!IS_ERR(t)) {
                struct arpt_getinfo info;
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
 
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
@@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = t->private;
+       private = xt_table_get_private_protected(t);
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
                                       void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
        void __user *pos;
        unsigned int size;
        int ret = 0;
index f15bc21..6e2851f 100644 (file)
@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
        WARN_ON(!(table->valid_hooks & (1 << hook)));
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = READ_ONCE(table->private); /* Address dependency. */
+       private = rcu_access_pointer(table->private);
        cpu        = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct ipt_entry *e;
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
        int ret = 0;
        const void *loc_cpu_entry;
 
@@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, AF_INET, name);
        if (!IS_ERR(t)) {
                struct ipt_getinfo info;
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
@@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = t->private;
+       private = xt_table_get_private_protected(t);
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
                            void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
        void __user *pos;
        unsigned int size;
        int ret = 0;
index 389d1b3..ef4bdb0 100644 (file)
@@ -510,7 +510,6 @@ static void tcp_init_buffer_space(struct sock *sk)
        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
                tcp_sndbuf_expand(sk);
 
-       tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
        tcp_mstamp_refresh(tp);
        tp->rcvq_space.time = tp->tcp_mstamp;
        tp->rcvq_space.seq = tp->copied_seq;
@@ -534,6 +533,8 @@ static void tcp_init_buffer_space(struct sock *sk)
 
        tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
        tp->snd_cwnd_stamp = tcp_jiffies32;
+       tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
+                                   (u32)TCP_INIT_CWND * tp->advmss);
 }
 
 /* 4. Recalculate window clamp after socket hit its memory bounds. */
index 8391aa2..595dcc3 100644 (file)
@@ -984,7 +984,8 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
                tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
-                               tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
+                               (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
+                               (inet_sk(sk)->tos & INET_ECN_MASK) :
                                inet_sk(sk)->tos;
 
                if (!INET_ECN_is_capable(tos) &&
@@ -1541,7 +1542,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = prandom_u32();
 
-       /* Set ToS of the new socket based upon the value of incoming SYN. */
+       /* Set ToS of the new socket based upon the value of incoming SYN.
+        * ECT bits are set later in tcp_init_transfer().
+        */
        if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
                newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
index bf48cd7..9901176 100644 (file)
@@ -1880,7 +1880,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
         * window, and remember whether we were cwnd-limited then.
         */
        if (!before(tp->snd_una, tp->max_packets_seq) ||
-           tp->packets_out > tp->max_packets_out) {
+           tp->packets_out > tp->max_packets_out ||
+           is_cwnd_limited) {
                tp->max_packets_out = tp->packets_out;
                tp->max_packets_seq = tp->snd_nxt;
                tp->is_cwnd_limited = is_cwnd_limited;
@@ -2702,6 +2703,10 @@ repair:
        else
                tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
 
+       is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
+       if (likely(sent_pkts || is_cwnd_limited))
+               tcp_cwnd_validate(sk, is_cwnd_limited);
+
        if (likely(sent_pkts)) {
                if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += sent_pkts;
@@ -2709,8 +2714,6 @@ repair:
                /* Send one loss probe per tail loss episode. */
                if (push_one != 2)
                        tcp_schedule_loss_probe(sk, false);
-               is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
-               tcp_cwnd_validate(sk, is_cwnd_limited);
                return false;
        }
        return !tp->packets_out && !tcp_write_queue_empty(sk);
index 09f0a23..9eeebd4 100644 (file)
@@ -2173,7 +2173,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                __skb_pull(skb, skb_transport_offset(skb));
                ret = udp_queue_rcv_one_skb(sk, skb);
                if (ret > 0)
-                       ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
+                       ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
        }
        return 0;
 }
index 2e2119b..c4f532f 100644 (file)
@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = READ_ONCE(table->private); /* Address dependency. */
+       private = rcu_access_pointer(table->private);
        cpu        = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct ip6t_entry *e;
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
        int ret = 0;
        const void *loc_cpu_entry;
 
@@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, AF_INET6, name);
        if (!IS_ERR(t)) {
                struct ip6t_getinfo info;
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               struct xt_table_info *private = t->private;
+               struct xt_table_info *private = xt_table_get_private_protected(t);
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
@@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = t->private;
+       private = xt_table_get_private_protected(t);
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
                            void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       const struct xt_table_info *private = xt_table_get_private_protected(table);
        void __user *pos;
        unsigned int size;
        int ret = 0;
index 992cbf3..991dc36 100644 (file)
@@ -528,7 +528,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
                tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
-                               tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
+                               (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
+                               (np->tclass & INET_ECN_MASK) :
                                np->tclass;
 
                if (!INET_ECN_is_capable(tclass) &&
@@ -1320,7 +1321,9 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
        if (np->repflow)
                newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
 
-       /* Set ToS of the new socket based upon the value of incoming SYN. */
+       /* Set ToS of the new socket based upon the value of incoming SYN.
+        * ECT bits are set later in tcp_init_transfer().
+        */
        if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
                newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
index 1be7759..44154cc 100644 (file)
@@ -948,6 +948,8 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
                return ret;
        }
 
+       set_bit(SDATA_STATE_RUNNING, &sdata->state);
+
        ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
        if (ret) {
                kfree(sdata);
index 48f31ac..620ecf9 100644 (file)
@@ -60,6 +60,7 @@ static struct mesh_table *mesh_table_alloc(void)
        atomic_set(&newtbl->entries,  0);
        spin_lock_init(&newtbl->gates_lock);
        spin_lock_init(&newtbl->walk_lock);
+       rhashtable_init(&newtbl->rhead, &mesh_rht_params);
 
        return newtbl;
 }
@@ -773,9 +774,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
                goto free_path;
        }
 
-       rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
-       rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
-
        sdata->u.mesh.mesh_paths = tbl_path;
        sdata->u.mesh.mpp_paths = tbl_mpp;
 
index 4934206..94e624e 100644 (file)
@@ -3455,7 +3455,7 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
 
        *chandef = he_chandef;
 
-       return false;
+       return true;
 }
 
 bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
index 84d1194..b921cbd 100644 (file)
@@ -67,6 +67,7 @@ void mptcp_seq_show(struct seq_file *seq)
                for (i = 0; mptcp_snmp_list[i].name; i++)
                        seq_puts(seq, " 0");
 
+               seq_putc(seq, '\n');
                return;
        }
 
index 23abf15..9a08076 100644 (file)
@@ -1723,6 +1723,10 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
        }
 
        nla_strlcpy(ifname, attr, IFNAMSIZ);
+       /* nf_tables_netdev_event() is called under rtnl_mutex, this is
+        * indirectly serializing all the other holders of the commit_mutex with
+        * the rtnl_mutex.
+        */
        dev = __dev_get_by_name(net, ifname);
        if (!dev) {
                err = -ENOENT;
@@ -3719,7 +3723,7 @@ cont:
        return 0;
 }
 
-static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
+int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
 {
        u64 ms = be64_to_cpu(nla_get_be64(nla));
        u64 max = (u64)(~((u64)0));
@@ -3733,7 +3737,7 @@ static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
        return 0;
 }
 
-static __be64 nf_jiffies64_to_msecs(u64 input)
+__be64 nf_jiffies64_to_msecs(u64 input)
 {
        return cpu_to_be64(jiffies64_to_msecs(input));
 }
index 322bd67..a1b0aac 100644 (file)
@@ -177,8 +177,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
        }
 #endif
        case NFT_CT_ID:
-               if (!nf_ct_is_confirmed(ct))
-                       goto err;
                *dest = nf_ct_get_id(ct);
                return;
        default:
index 64ca13a..9af4f93 100644 (file)
@@ -157,8 +157,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
        if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
                if (!(set->flags & NFT_SET_TIMEOUT))
                        return -EINVAL;
-               timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
-                                               tb[NFTA_DYNSET_TIMEOUT])));
+
+               err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
+               if (err)
+                       return err;
        }
 
        priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@@ -267,7 +269,7 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
        if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
                goto nla_put_failure;
        if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
-                        cpu_to_be64(jiffies_to_msecs(priv->timeout)),
+                        nf_jiffies64_to_msecs(priv->timeout),
                         NFTA_DYNSET_PAD))
                goto nla_put_failure;
        if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
index af22dbe..acce622 100644 (file)
@@ -1349,6 +1349,14 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
 }
 EXPORT_SYMBOL(xt_counters_alloc);
 
+struct xt_table_info
+*xt_table_get_private_protected(const struct xt_table *table)
+{
+       return rcu_dereference_protected(table->private,
+                                        mutex_is_locked(&xt[table->af].mutex));
+}
+EXPORT_SYMBOL(xt_table_get_private_protected);
+
 struct xt_table_info *
 xt_replace_table(struct xt_table *table,
              unsigned int num_counters,
@@ -1356,7 +1364,6 @@ xt_replace_table(struct xt_table *table,
              int *error)
 {
        struct xt_table_info *private;
-       unsigned int cpu;
        int ret;
 
        ret = xt_jumpstack_alloc(newinfo);
@@ -1366,47 +1373,20 @@ xt_replace_table(struct xt_table *table,
        }
 
        /* Do the substitution. */
-       local_bh_disable();
-       private = table->private;
+       private = xt_table_get_private_protected(table);
 
        /* Check inside lock: is the old number correct? */
        if (num_counters != private->number) {
                pr_debug("num_counters != table->private->number (%u/%u)\n",
                         num_counters, private->number);
-               local_bh_enable();
                *error = -EAGAIN;
                return NULL;
        }
 
        newinfo->initial_entries = private->initial_entries;
-       /*
-        * Ensure contents of newinfo are visible before assigning to
-        * private.
-        */
-       smp_wmb();
-       table->private = newinfo;
-
-       /* make sure all cpus see new ->private value */
-       smp_wmb();
 
-       /*
-        * Even though table entries have now been swapped, other CPU's
-        * may still be using the old entries...
-        */
-       local_bh_enable();
-
-       /* ... so wait for even xt_recseq on all cpus */
-       for_each_possible_cpu(cpu) {
-               seqcount_t *s = &per_cpu(xt_recseq, cpu);
-               u32 seq = raw_read_seqcount(s);
-
-               if (seq & 1) {
-                       do {
-                               cond_resched();
-                               cpu_relax();
-                       } while (seq == raw_read_seqcount(s));
-               }
-       }
+       rcu_assign_pointer(table->private, newinfo);
+       synchronize_rcu();
 
        audit_log_nfcfg(table->name, table->af, private->number,
                        !private->number ? AUDIT_XT_OP_REGISTER :
@@ -1442,12 +1422,12 @@ struct xt_table *xt_register_table(struct net *net,
        }
 
        /* Simplifies replace_table code. */
-       table->private = bootstrap;
+       rcu_assign_pointer(table->private, bootstrap);
 
        if (!xt_replace_table(table, 0, newinfo, &ret))
                goto unlock;
 
-       private = table->private;
+       private = xt_table_get_private_protected(table);
        pr_debug("table->private->number = %u\n", private->number);
 
        /* save number of initial entries */
@@ -1470,7 +1450,8 @@ void *xt_unregister_table(struct xt_table *table)
        struct xt_table_info *private;
 
        mutex_lock(&xt[table->af].mutex);
-       private = table->private;
+       private = xt_table_get_private_protected(table);
+       RCU_INIT_POINTER(table->private, NULL);
        list_del(&table->list);
        mutex_unlock(&xt[table->af].mutex);
        audit_log_nfcfg(table->name, table->af, private->number,
index ec0689d..4c5c233 100644 (file)
@@ -2531,7 +2531,7 @@ static int validate_and_copy_dec_ttl(struct net *net,
 
        action_start = add_nested_action_start(sfa, OVS_DEC_TTL_ATTR_ACTION, log);
        if (action_start < 0)
-               return start;
+               return action_start;
 
        err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
                                     vlan_tci, mpls_label_count, log);
index fed18fd..1319986 100644 (file)
@@ -2424,8 +2424,8 @@ static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
                        return err;
        }
        if (lse_mask->mpls_label) {
-               err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
-                                lse_key->mpls_label);
+               err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
+                                 lse_key->mpls_label);
                if (err)
                        return err;
        }
index 4dda155..949163f 100644 (file)
@@ -401,6 +401,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
 
        INIT_LIST_HEAD(&q->new_flows);
        INIT_LIST_HEAD(&q->old_flows);
+       timer_setup(&q->adapt_timer, fq_pie_timer, 0);
 
        if (opt) {
                err = fq_pie_change(sch, opt, extack);
@@ -426,7 +427,6 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
                pie_vars_init(&flow->vars);
        }
 
-       timer_setup(&q->adapt_timer, fq_pie_timer, 0);
        mod_timer(&q->adapt_timer, jiffies + HZ / 2);
 
        return 0;
index c95d037..83978d5 100644 (file)
@@ -2181,9 +2181,11 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
                                                        &xmitq);
                        else if (prop == TIPC_NLA_PROP_MTU)
                                tipc_link_set_mtu(e->link, b->mtu);
+
+                       /* Update MTU for node link entry */
+                       e->mtu = tipc_link_mss(e->link);
                }
-               /* Update MTU for node link entry */
-               e->mtu = tipc_link_mss(e->link);
+
                tipc_node_write_unlock(n);
                tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
        }
index a77174b..f67ddf2 100644 (file)
@@ -12634,7 +12634,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
        struct net_device *dev = info->user_ptr[1];
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct nlattr *tb[NUM_NL80211_REKEY_DATA];
-       struct cfg80211_gtk_rekey_data rekey_data;
+       struct cfg80211_gtk_rekey_data rekey_data = {};
        int err;
 
        if (!info->attrs[NL80211_ATTR_REKEY_DATA])
index b7b039b..6250447 100644 (file)
@@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
        return 0;
 }
 
+static bool xsk_tx_writeable(struct xdp_sock *xs)
+{
+       if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
+               return false;
+
+       return true;
+}
+
 static bool xsk_is_bound(struct xdp_sock *xs)
 {
        if (READ_ONCE(xs->state) == XSK_BOUND) {
@@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
        rcu_read_lock();
        list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
                __xskq_cons_release(xs->tx);
-               xs->sk.sk_write_space(&xs->sk);
+               if (xsk_tx_writeable(xs))
+                       xs->sk.sk_write_space(&xs->sk);
        }
        rcu_read_unlock();
 }
@@ -436,7 +445,8 @@ static int xsk_generic_xmit(struct sock *sk)
 
 out:
        if (sent_frame)
-               sk->sk_write_space(sk);
+               if (xsk_tx_writeable(xs))
+                       sk->sk_write_space(sk);
 
        mutex_unlock(&xs->mutex);
        return err;
@@ -471,11 +481,13 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 static __poll_t xsk_poll(struct file *file, struct socket *sock,
                             struct poll_table_struct *wait)
 {
-       __poll_t mask = datagram_poll(file, sock, wait);
+       __poll_t mask = 0;
        struct sock *sk = sock->sk;
        struct xdp_sock *xs = xdp_sk(sk);
        struct xsk_buff_pool *pool;
 
+       sock_poll_wait(file, sock, wait);
+
        if (unlikely(!xsk_is_bound(xs)))
                return mask;
 
@@ -491,7 +503,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
 
        if (xs->rx && !xskq_prod_is_empty(xs->rx))
                mask |= EPOLLIN | EPOLLRDNORM;
-       if (xs->tx && !xskq_cons_is_full(xs->tx))
+       if (xs->tx && xsk_tx_writeable(xs))
                mask |= EPOLLOUT | EPOLLWRNORM;
 
        return mask;
index 9287edd..d5adeee 100644 (file)
@@ -175,6 +175,7 @@ static int __xp_assign_dev(struct xsk_buff_pool *pool,
 
        if (!pool->dma_pages) {
                WARN(1, "Driver did not DMA map zero-copy buffers");
+               err = -EINVAL;
                goto err_unreg_xsk;
        }
        pool->umem->zc = true;
index cdb9cf3..9e71b9f 100644 (file)
@@ -264,6 +264,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
                q->nentries;
 }
 
+static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
+{
+       /* No barriers needed since data is not accessed */
+       return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
+}
+
 /* Functions for producers */
 
 static inline bool xskq_prod_is_full(struct xsk_queue *q)
index e28f0c9..d8e8a11 100644 (file)
@@ -234,6 +234,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
        case XFRMA_PAD:
                /* Ignore */
                return 0;
+       case XFRMA_UNSPEC:
        case XFRMA_ALG_AUTH:
        case XFRMA_ALG_CRYPT:
        case XFRMA_ALG_COMP:
@@ -387,7 +388,7 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
 
        memcpy(nla, src, nla_attr_size(copy_len));
        nla->nla_len = nla_attr_size(payload);
-       *pos += nla_attr_size(payload);
+       *pos += nla_attr_size(copy_len);
        nlmsg->nlmsg_len += nla->nla_len;
 
        memset(dst + *pos, 0, payload - copy_len);
@@ -563,7 +564,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
                return NULL;
 
        len += NLMSG_HDRLEN;
-       h64 = kvmalloc(len, GFP_KERNEL | __GFP_ZERO);
+       h64 = kvmalloc(len, GFP_KERNEL);
        if (!h64)
                return ERR_PTR(-ENOMEM);
 
index a77da7a..2f15178 100644 (file)
@@ -2382,8 +2382,10 @@ int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
        if (in_compat_syscall()) {
                struct xfrm_translator *xtr = xfrm_get_translator();
 
-               if (!xtr)
+               if (!xtr) {
+                       kfree(data);
                        return -EOPNOTSUPP;
+               }
 
                err = xtr->xlate_user_policy_sockptr(&data, optlen);
                xfrm_put_translator(xtr);
index df7d8ec..477e55d 100644 (file)
@@ -89,9 +89,9 @@ libbpf_print_none(__maybe_unused enum libbpf_print_level level,
 
 int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
 {
-       char buf[4096];
-       struct pid_iter_bpf *skel;
        struct pid_iter_entry *e;
+       char buf[4096 / sizeof(*e) * sizeof(*e)];
+       struct pid_iter_bpf *skel;
        int err, ret, fd = -1, i;
        libbpf_print_fn_t default_print;
 
index e6ceac3..556216d 100644 (file)
@@ -3897,8 +3897,8 @@ union bpf_attr {
        FN(seq_printf_btf),             \
        FN(skb_cgroup_classid),         \
        FN(redirect_neigh),             \
-       FN(bpf_per_cpu_ptr),            \
-       FN(bpf_this_cpu_ptr),           \
+       FN(per_cpu_ptr),                \
+       FN(this_cpu_ptr),               \
        FN(redirect_peer),              \
        /* */
 
index 5c6522c..98537ff 100644 (file)
@@ -278,7 +278,7 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
                err = ringbuf_process_ring(ring);
                if (err < 0)
                        return err;
-               res += cnt;
+               res += err;
        }
        return cnt < 0 ? -errno : res;
 }
index cb16d2a..54188ee 100755 (executable)
@@ -2040,7 +2040,7 @@ sub reboot_to {
 
     if ($reboot_type eq "grub") {
        run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
-    } elsif ($reboot_type eq "grub2") {
+    } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
        run_ssh "$grub_reboot $grub_number";
     } elsif ($reboot_type eq "syslinux") {
        run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
index 5241405..5861446 100644 (file)
@@ -456,10 +456,10 @@ static struct bpf_align_test tests[] = {
                         */
                        {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
                        /* Checked s>=0 */
-                       {9, "R5=inv(id=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
+                       {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
                        /* packet pointer + nonnegative (4n+2) */
-                       {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
-                       {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
+                       {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
+                       {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
                        /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
                         * We checked the bounds, but it might have been able
                         * to overflow if the packet pointer started in the
@@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = {
                         * So we did not get a 'range' on R6, and the access
                         * attempt will fail.
                         */
-                       {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"},
+                       {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
                }
        },
        {
index c165054..fddbc5d 100644 (file)
@@ -217,9 +217,15 @@ void test_ringbuf(void)
        if (CHECK(err, "join_bg", "err %d\n", err))
                goto cleanup;
 
-       if (CHECK(bg_ret != 1, "bg_ret", "epoll_wait result: %ld", bg_ret))
+       if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
                goto cleanup;
 
+       /* due to timing variations, there could still be non-notified
+        * samples, so consume them here to collect all the samples
+        */
+       err = ring_buffer__consume(ringbuf);
+       CHECK(err < 0, "rb_consume", "failed: %d\b", err);
+
        /* 3 rounds, 2 samples each */
        cnt = atomic_xchg(&sample_cnt, 0);
        CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
index 78e4506..d37161e 100644 (file)
@@ -81,7 +81,7 @@ void test_ringbuf_multi(void)
 
        /* poll for samples, should get 2 ringbufs back */
        err = ring_buffer__poll(ringbuf, -1);
-       if (CHECK(err != 4, "poll_res", "expected 4 records, got %d\n", err))
+       if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err))
                goto cleanup;
 
        /* expect extra polling to return nothing */
index 43c9cda..b99bb8e 100755 (executable)
@@ -184,9 +184,7 @@ def bpftool_prog_list(expected=None, ns=""):
 def bpftool_map_list(expected=None, ns=""):
     _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
     # Remove the base maps
-    for m in base_maps:
-        if m in maps:
-            maps.remove(m)
+    maps = [m for m in maps if m not in base_maps and m.get('name') not in base_map_names]
     if expected is not None:
         if len(maps) != expected:
             fail(True, "%d BPF maps loaded, expected %d" %
@@ -716,13 +714,11 @@ def test_multi_prog(simdev, sim, obj, modename, modeid):
     fail(ret == 0, "Replaced one of programs without -force")
     check_extack(err, "XDP program already attached.", args)
 
-    if modename == "" or modename == "drv":
-        othermode = "" if modename == "drv" else "drv"
-        start_test("Test multi-attachment XDP - detach...")
-        ret, _, err = sim.unset_xdp(othermode, force=True,
-                                    fail=False, include_stderr=True)
-        fail(ret == 0, "Removed program with a bad mode")
-        check_extack(err, "program loaded with different flags.", args)
+    start_test("Test multi-attachment XDP - remove without mode...")
+    ret, _, err = sim.unset_xdp("", force=True,
+                                fail=False, include_stderr=True)
+    fail(ret == 0, "Removed program without a mode flag")
+    check_extack(err, "More than one program loaded, unset mode is ambiguous.", args)
 
     sim.unset_xdp("offload")
     xdp = sim.ip_link_show(xdp=True)["xdp"]
@@ -772,6 +768,9 @@ ret, progs = bpftool("prog", fail=False)
 skip(ret != 0, "bpftool not installed")
 base_progs = progs
 _, base_maps = bpftool("map")
+base_map_names = [
+    'pid_iter.rodata' # created on each bpftool invocation
+]
 
 # Check netdevsim
 ret, out = cmd("modprobe netdevsim", fail=False)
@@ -913,11 +912,18 @@ try:
 
     sim.tc_flush_filters()
 
+    start_test("Test TC offloads failure...")
+    sim.dfs["dev/bpf_bind_verifier_accept"] = 0
+    ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
+                                         fail=False, include_stderr=True)
+    fail(ret == 0, "TC filter did not reject with TC offloads enabled")
+    check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+    sim.dfs["dev/bpf_bind_verifier_accept"] = 1
+
     start_test("Test TC offloads work...")
     ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True,
                                          fail=False, include_stderr=True)
     fail(ret != 0, "TC filter did not load with TC offloads enabled")
-    check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
 
     start_test("Test TC offload basics...")
     dfs = simdev.dfs_get_bound_progs(expected=1)
@@ -941,6 +947,7 @@ try:
     start_test("Test disabling TC offloads is rejected while filters installed...")
     ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
     fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
+    sim.set_ethtool_tc_offloads(True)
 
     start_test("Test qdisc removal frees things...")
     sim.tc_flush_filters()
@@ -999,18 +1006,8 @@ try:
                               fail=False, include_stderr=True)
     fail(ret == 0, "Replaced XDP program with a program in different mode")
     check_extack(err,
-                 "native and generic XDP can't be active at the same time.",
+                 "Native and generic XDP can't be active at the same time.",
                  args)
-    ret, _, err = sim.set_xdp(obj, "", force=True,
-                              fail=False, include_stderr=True)
-    fail(ret == 0, "Replaced XDP program with a program in different mode")
-    check_extack(err, "program loaded with different flags.", args)
-
-    start_test("Test XDP prog remove with bad flags...")
-    ret, _, err = sim.unset_xdp("", force=True,
-                                fail=False, include_stderr=True)
-    fail(ret == 0, "Removed program with a bad mode")
-    check_extack(err, "program loaded with different flags.", args)
 
     start_test("Test MTU restrictions...")
     ret, _ = sim.set_mtu(9000, fail=False)
@@ -1040,10 +1037,19 @@ try:
     offload = bpf_pinned("/sys/fs/bpf/offload")
     ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True)
     fail(ret == 0, "attached offloaded XDP program to drv")
-    check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args)
+    check_extack(err, "Using device-bound program without HW_MODE flag is not supported.", args)
     rm("/sys/fs/bpf/offload")
     sim.wait_for_flush()
 
+    start_test("Test XDP load failure...")
+    sim.dfs["dev/bpf_bind_verifier_accept"] = 0
+    ret, _, err = bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload",
+                                 dev=sim['ifname'], fail=False, include_stderr=True)
+    fail(ret == 0, "verifier should fail on load")
+    check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
+    sim.dfs["dev/bpf_bind_verifier_accept"] = 1
+    sim.wait_for_flush()
+
     start_test("Test XDP offload...")
     _, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
     ipl = sim.ip_link_show(xdp=True)
@@ -1051,7 +1057,6 @@ try:
     progs = bpftool_prog_list(expected=1)
     prog = progs[0]
     fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
-    check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
 
     start_test("Test XDP offload is device bound...")
     dfs = simdev.dfs_get_bound_progs(expected=1)
index 1c4b193..bed53b5 100644 (file)
@@ -68,7 +68,7 @@
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
        BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
-       BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+       BPF_JMP32_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
        BPF_MOV32_IMM(BPF_REG_1, 0),
        BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
        BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
index dac40de..57ed67b 100644 (file)
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
+{
+       "bounds checks after 32-bit truncation. test 1",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+       /* This used to reduce the max bound to 0x7fffffff */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+       BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 3 },
+       .errstr_unpriv = "R0 leaks addr",
+       .result_unpriv = REJECT,
+       .result = ACCEPT,
+},
+{
+       "bounds checks after 32-bit truncation. test 2",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1),
+       BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 3 },
+       .errstr_unpriv = "R0 leaks addr",
+       .result_unpriv = REJECT,
+       .result = ACCEPT,
+},
index fb5c55d..02b0b9e 100755 (executable)
@@ -256,6 +256,28 @@ setup_cmd_nsb()
        fi
 }
 
+setup_cmd_nsc()
+{
+       local cmd="$*"
+       local rc
+
+       run_cmd_nsc ${cmd}
+       rc=$?
+       if [ $rc -ne 0 ]; then
+               # show user the command if not done so already
+               if [ "$VERBOSE" = "0" ]; then
+                       echo "setup command: $cmd"
+               fi
+               echo "failed. stopping tests"
+               if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                       echo
+                       echo "hit enter to continue"
+                       read a
+               fi
+               exit $rc
+       fi
+}
+
 # set sysctl values in NS-A
 set_sysctl()
 {
@@ -471,6 +493,36 @@ setup()
        sleep 1
 }
 
+setup_lla_only()
+{
+       # make sure we are starting with a clean slate
+       kill_procs
+       cleanup 2>/dev/null
+
+       log_debug "Configuring network namespaces"
+       set -e
+
+       create_ns ${NSA} "-" "-"
+       create_ns ${NSB} "-" "-"
+       create_ns ${NSC} "-" "-"
+       connect_ns ${NSA} ${NSA_DEV} "-" "-" \
+                  ${NSB} ${NSB_DEV} "-" "-"
+       connect_ns ${NSA} ${NSA_DEV2} "-" "-" \
+                  ${NSC} ${NSC_DEV}  "-" "-"
+
+       NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV})
+       NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV})
+       NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV})
+
+       create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-"
+       ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF}
+       ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF}
+
+       set +e
+
+       sleep 1
+}
+
 ################################################################################
 # IPv4
 
@@ -3787,10 +3839,53 @@ use_case_br()
        setup_cmd_nsb ip li del vlan100 2>/dev/null
 }
 
+# VRF only.
+# ns-A device is connected to both ns-B and ns-C on a single VRF but only has
+# LLA on the interfaces
+use_case_ping_lla_multi()
+{
+       setup_lla_only
+       # only want reply from ns-A
+       setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
+       setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
+
+       log_start
+       run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
+       log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B"
+
+       run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
+       log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C"
+
+       # cycle/flap the first ns-A interface
+       setup_cmd ip link set ${NSA_DEV} down
+       setup_cmd ip link set ${NSA_DEV} up
+       sleep 1
+
+       log_start
+       run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
+       log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B"
+       run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
+       log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C"
+
+       # cycle/flap the second ns-A interface
+       setup_cmd ip link set ${NSA_DEV2} down
+       setup_cmd ip link set ${NSA_DEV2} up
+       sleep 1
+
+       log_start
+       run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
+       log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B"
+       run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
+       log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C"
+}
+
 use_cases()
 {
        log_section "Use cases"
+       log_subsection "Device enslaved to bridge"
        use_case_br
+       log_subsection "Ping LLA with multiple interfaces"
+       use_case_ping_lla_multi
 }
 
 ################################################################################
index db3d4a8..76a2405 100644 (file)
@@ -113,6 +113,9 @@ static void do_poll(int fd, int timeout_ms)
                                interrupted = true;
                                break;
                        }
+
+                       /* no events and more time to wait, do poll again */
+                       continue;
                }
                if (pfd.revents != POLLIN)
                        error(1, errno, "poll: 0x%x expected 0x%x\n",