Merge tag 'iommu-fix-v6.6-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Oct 2023 15:43:05 +0000 (05:43 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Oct 2023 15:43:05 +0000 (05:43 -1000)
Pull iommu fix from Joerg Roedel:

 - Fix boot regression for Sapphire Rapids with Intel VT-d driver

* tag 'iommu-fix-v6.6-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu: Avoid unnecessary cache invalidations

109 files changed:
.mailmap
MAINTAINERS
arch/arm/boot/dts/rockchip/rk3128.dtsi
arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/timer32k.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm64/boot/dts/qcom/apq8096-db820c.dts
arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/powerpc/kernel/setup-common.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/pgtable.c
arch/riscv/Kconfig
arch/riscv/Kconfig.errata
arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
arch/riscv/boot/dts/thead/th1520.dtsi
arch/riscv/mm/fault.c
arch/riscv/mm/hugetlbpage.c
drivers/accel/ivpu/ivpu_hw_37xx.c
drivers/acpi/nfit/core.c
drivers/cache/Kconfig
drivers/clk/ti/clk-44xx.c
drivers/clk/ti/clk-54xx.c
drivers/connector/cn_proc.c
drivers/crypto/virtio/virtio_crypto_common.h
drivers/crypto/virtio/virtio_crypto_core.c
drivers/firmware/imx/imx-dsp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/display/drm_dp_mst_topology.c
drivers/gpu/drm/i915/gt/intel_gt_mcr.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/logicvc/Kconfig
drivers/isdn/hardware/mISDN/hfcsusb.c
drivers/net/ethernet/adi/adin1110.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sfc/tc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/gtp.c
drivers/net/ieee802154/adf7242.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/soc/renesas/Kconfig
drivers/vdpa/mlx5/net/debug.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/mlx5/net/mlx5_vnet.h
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
drivers/vhost/vhost.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci_modern_dev.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/relocation.c
fs/nfsd/vfs.c
include/linux/hugetlb.h
include/linux/ieee80211.h
include/linux/kasan.h
include/net/netfilter/nf_flow_table.h
include/uapi/linux/gtp.h
lib/maple_tree.c
lib/test_maple_tree.c
mm/damon/sysfs.c
mm/hugetlb.c
mm/kasan/report.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/page_alloc.c
mm/zswap.c
net/core/neighbour.c
net/handshake/netlink.c
net/ipv4/esp4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv6/esp6.c
net/mac80211/rx.c
net/netfilter/nf_flow_table_core.c
net/sched/act_ct.c
net/vmw_vsock/virtio_transport.c
net/wireless/mlme.c
net/wireless/scan.c
tools/include/linux/rwsem.h [new file with mode: 0644]
tools/include/nolibc/arch-i386.h
tools/include/nolibc/crt.h
tools/testing/selftests/mm/mremap_dontunmap.c
tools/virtio/linux/dma-mapping.h

index c80903e..2643b72 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -87,6 +87,7 @@ Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
 Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
+Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
 Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
 Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
 Ben Gardner <bgardner@wabtec.com>
@@ -450,9 +451,10 @@ Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
 Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
 Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
 Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
-Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
-Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
+Oleksij Rempel <o.rempel@pengutronix.de>
+Oleksij Rempel <o.rempel@pengutronix.de> <ore@pengutronix.de>
 Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
+OndÅ™ej Jirman <megi@xff.cz> <megous@megous.com>
 Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org>
 Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
index 2894f07..8bc58b2 100644 (file)
@@ -6766,7 +6766,7 @@ F:        drivers/gpu/drm/panel/panel-sitronix-st7701.c
 DRM DRIVER FOR SITRONIX ST7703 PANELS
 M:     Guido Günther <agx@sigxcpu.org>
 R:     Purism Kernel Team <kernel@puri.sm>
-R:     Ondrej Jirman <megous@megous.com>
+R:     Ondrej Jirman <megi@xff.cz>
 S:     Maintained
 F:     Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
 F:     drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -13846,9 +13846,10 @@ F:     Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
 F:     drivers/staging/media/meson/vdec/
 
 METHODE UDPU SUPPORT
-M:     Vladimir Vid <vladimir.vid@sartura.hr>
+M:     Robert Marko <robert.marko@sartura.hr>
 S:     Maintained
-F:     arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+F:     arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts
+F:     arch/arm64/boot/dts/marvell/armada-3720-uDPU.*
 
 MHI BUS
 M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
@@ -15131,7 +15132,7 @@ NOLIBC HEADER FILE
 M:     Willy Tarreau <w@1wt.eu>
 M:     Thomas Weißschuh <linux@weissschuh.net>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nolibc/linux-nolibc.git
 F:     tools/include/nolibc/
 F:     tools/testing/selftests/nolibc/
 
index b63bd4a..88a4b0d 100644 (file)
@@ -64,7 +64,8 @@
                compatible = "arm,armv7-timer";
                interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
                             <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
-                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
                arm,cpu-registers-not-fw-configured;
                clock-frequency = <24000000>;
        };
                compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
                reg = <0x20044000 0x20>;
                interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru PCLK_TIMER>, <&xin24m>;
+               clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER0>;
                clock-names = "pclk", "timer";
        };
 
                compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
                reg = <0x20044020 0x20>;
                interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru PCLK_TIMER>, <&xin24m>;
+               clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER1>;
                clock-names = "pclk", "timer";
        };
 
                compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
                reg = <0x20044040 0x20>;
                interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru PCLK_TIMER>, <&xin24m>;
+               clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER2>;
                clock-names = "pclk", "timer";
        };
 
                compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
                reg = <0x20044060 0x20>;
                interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru PCLK_TIMER>, <&xin24m>;
+               clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER3>;
                clock-names = "pclk", "timer";
        };
 
                compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
                reg = <0x20044080 0x20>;
                interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru PCLK_TIMER>, <&xin24m>;
+               clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER4>;
                clock-names = "pclk", "timer";
        };
 
                compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
                reg = <0x200440a0 0x20>;
                interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&cru PCLK_TIMER>, <&xin24m>;
+               clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER5>;
                clock-names = "pclk", "timer";
        };
 
 
        i2c0: i2c@20072000 {
                compatible = "rockchip,rk3128-i2c", "rockchip,rk3288-i2c";
-               reg = <20072000 0x1000>;
+               reg = <0x20072000 0x1000>;
                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
                clock-names = "i2c";
                clocks = <&cru PCLK_I2C0>;
                interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                arm,pl330-broken-no-flushp;
+               arm,pl330-periph-burst;
                clocks = <&cru ACLK_DMAC>;
                clock-names = "apb_pclk";
                #dma-cells = <1>;
index 7ae8b62..59f546a 100644 (file)
                                reg = <0x0 0xff>, /* MPU private access */
                                      <0x49022000 0xff>; /* L3 Interconnect */
                                reg-names = "mpu", "dma";
+                               clocks = <&abe_clkctrl OMAP4_MCBSP1_CLKCTRL 24>;
+                               clock-names = "fck";
                                interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "common";
                                ti,buffer-size = <128>;
                                reg = <0x0 0xff>, /* MPU private access */
                                      <0x49024000 0xff>; /* L3 Interconnect */
                                reg-names = "mpu", "dma";
+                               clocks = <&abe_clkctrl OMAP4_MCBSP2_CLKCTRL 24>;
+                               clock-names = "fck";
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "common";
                                ti,buffer-size = <128>;
                                reg = <0x0 0xff>, /* MPU private access */
                                      <0x49026000 0xff>; /* L3 Interconnect */
                                reg-names = "mpu", "dma";
+                               clocks = <&abe_clkctrl OMAP4_MCBSP3_CLKCTRL 24>;
+                               clock-names = "fck";
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "common";
                                ti,buffer-size = <128>;
index 46b8f9e..3fcef30 100644 (file)
                                compatible = "ti,omap4-mcbsp";
                                reg = <0x0 0xff>; /* L4 Interconnect */
                                reg-names = "mpu";
+                               clocks = <&l4_per_clkctrl OMAP4_MCBSP4_CLKCTRL 24>;
+                               clock-names = "fck";
                                interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "common";
                                ti,buffer-size = <128>;
index a03bca5..97b0c3b 100644 (file)
                                reg = <0x0 0xff>, /* MPU private access */
                                      <0x49022000 0xff>; /* L3 Interconnect */
                                reg-names = "mpu", "dma";
+                               clocks = <&abe_clkctrl OMAP5_MCBSP1_CLKCTRL 24>;
+                               clock-names = "fck";
                                interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "common";
                                ti,buffer-size = <128>;
                                reg = <0x0 0xff>, /* MPU private access */
                                      <0x49024000 0xff>; /* L3 Interconnect */
                                reg-names = "mpu", "dma";
+                               clocks = <&abe_clkctrl OMAP5_MCBSP2_CLKCTRL 24>;
+                               clock-names = "fck";
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "common";
                                ti,buffer-size = <128>;
                                reg = <0x0 0xff>, /* MPU private access */
                                      <0x49026000 0xff>; /* L3 Interconnect */
                                reg-names = "mpu", "dma";
+                               clocks = <&abe_clkctrl OMAP5_MCBSP3_CLKCTRL 24>;
+                               clock-names = "fck";
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "common";
                                ti,buffer-size = <128>;
index 9808cd2..67de96c 100644 (file)
@@ -550,6 +550,7 @@ static struct platform_device *ams_delta_devices[] __initdata = {
        &ams_delta_nand_device,
        &ams_delta_lcd_device,
        &cx20442_codec_device,
+       &modem_nreset_device,
 };
 
 static struct gpiod_lookup_table *ams_delta_gpio_tables[] __initdata = {
@@ -782,26 +783,28 @@ static struct plat_serial8250_port ams_delta_modem_ports[] = {
        { },
 };
 
+static int ams_delta_modem_pm_activate(struct device *dev)
+{
+       modem_priv.regulator = regulator_get(dev, "RESET#");
+       if (IS_ERR(modem_priv.regulator))
+               return -EPROBE_DEFER;
+
+       return 0;
+}
+
+static struct dev_pm_domain ams_delta_modem_pm_domain = {
+       .activate       = ams_delta_modem_pm_activate,
+};
+
 static struct platform_device ams_delta_modem_device = {
        .name   = "serial8250",
        .id     = PLAT8250_DEV_PLATFORM1,
        .dev            = {
                .platform_data = ams_delta_modem_ports,
+               .pm_domain = &ams_delta_modem_pm_domain,
        },
 };
 
-static int __init modem_nreset_init(void)
-{
-       int err;
-
-       err = platform_device_register(&modem_nreset_device);
-       if (err)
-               pr_err("Couldn't register the modem regulator device\n");
-
-       return err;
-}
-
-
 /*
  * This function expects MODEM IRQ number already assigned to the port.
  * The MODEM device requires its RESET# pin kept high during probe.
@@ -833,37 +836,6 @@ static int __init ams_delta_modem_init(void)
 }
 arch_initcall_sync(ams_delta_modem_init);
 
-static int __init late_init(void)
-{
-       int err;
-
-       err = modem_nreset_init();
-       if (err)
-               return err;
-
-       /*
-        * Once the modem device is registered, the modem_nreset
-        * regulator can be requested on behalf of that device.
-        */
-       modem_priv.regulator = regulator_get(&ams_delta_modem_device.dev,
-                       "RESET#");
-       if (IS_ERR(modem_priv.regulator)) {
-               err = PTR_ERR(modem_priv.regulator);
-               goto unregister;
-       }
-       return 0;
-
-unregister:
-       platform_device_unregister(&ams_delta_modem_device);
-       return err;
-}
-
-static void __init ams_delta_init_late(void)
-{
-       omap1_init_late();
-       late_init();
-}
-
 static void __init ams_delta_map_io(void)
 {
        omap1_map_io();
@@ -877,7 +849,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
        .init_early     = omap1_init_early,
        .init_irq       = omap1_init_irq,
        .init_machine   = ams_delta_init,
-       .init_late      = ams_delta_init_late,
+       .init_late      = omap1_init_late,
        .init_time      = omap1_timer_init,
        .restart        = omap1_restart,
 MACHINE_END
index 410d17d..f618a6d 100644 (file)
@@ -176,17 +176,18 @@ static u64 notrace omap_32k_read_sched_clock(void)
        return sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
 }
 
+static struct timespec64 persistent_ts;
+static cycles_t cycles;
+static unsigned int persistent_mult, persistent_shift;
+
 /**
  * omap_read_persistent_clock64 -  Return time from a persistent clock.
+ * @ts: &struct timespec64 for the returned time
  *
  * Reads the time from a source which isn't disabled during PM, the
  * 32k sync timer.  Convert the cycles elapsed since last read into
  * nsecs and adds to a monotonically increasing timespec64.
  */
-static struct timespec64 persistent_ts;
-static cycles_t cycles;
-static unsigned int persistent_mult, persistent_shift;
-
 static void omap_read_persistent_clock64(struct timespec64 *ts)
 {
        unsigned long long nsecs;
@@ -206,10 +207,9 @@ static void omap_read_persistent_clock64(struct timespec64 *ts)
 /**
  * omap_init_clocksource_32k - setup and register counter 32k as a
  * kernel clocksource
- * @pbase: base addr of counter_32k module
- * @size: size of counter_32k to map
+ * @vbase: base addr of counter_32k module
  *
- * Returns 0 upon success or negative error code upon failure.
+ * Returns: %0 upon success or negative error code upon failure.
  *
  */
 static int __init omap_init_clocksource_32k(void __iomem *vbase)
index 1e17b5f..ba71928 100644 (file)
@@ -2209,7 +2209,7 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
                return err;
 
        pr_debug("omap_hwmod: %s %pOFn at %pR\n",
-                oh->name, np, &res);
+                oh->name, np, res);
 
        if (oh && oh->mpu_rt_idx) {
                omap_hwmod_fix_mpu_rt_idx(oh, np, res);
index 385b178..3067a40 100644 (file)
                stdout-path = "serial0:115200n8";
        };
 
-       clocks {
-               divclk4: divclk4 {
-                       compatible = "fixed-clock";
-                       #clock-cells = <0>;
-                       clock-frequency = <32768>;
-                       clock-output-names = "divclk4";
+       div1_mclk: divclk1 {
+               compatible = "gpio-gate-clock";
+               pinctrl-0 = <&audio_mclk>;
+               pinctrl-names = "default";
+               clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+               #clock-cells = <0>;
+               enable-gpios = <&pm8994_gpios 15 0>;
+       };
 
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&divclk4_pin_a>;
-               };
+       divclk4: divclk4 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <32768>;
+               clock-output-names = "divclk4";
 
-               div1_mclk: divclk1 {
-                       compatible = "gpio-gate-clock";
-                       pinctrl-0 = <&audio_mclk>;
-                       pinctrl-names = "default";
-                       clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
-                       #clock-cells = <0>;
-                       enable-gpios = <&pm8994_gpios 15 0>;
-               };
+               pinctrl-names = "default";
+               pinctrl-0 = <&divclk4_pin_a>;
        };
 
        gpio-keys {
index bcd2397..06f8ff6 100644 (file)
 #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
 
 / {
-       clocks {
-               divclk1_cdc: divclk1 {
-                       compatible = "gpio-gate-clock";
-                       clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
-                       #clock-cells = <0>;
-                       enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
+       divclk1_cdc: divclk1 {
+               compatible = "gpio-gate-clock";
+               clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+               #clock-cells = <0>;
+               enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
 
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&divclk1_default>;
-               };
+               pinctrl-names = "default";
+               pinctrl-0 = <&divclk1_default>;
+       };
 
-               divclk4: divclk4 {
-                       compatible = "fixed-clock";
-                       #clock-cells = <0>;
-                       clock-frequency = <32768>;
-                       clock-output-names = "divclk4";
+       divclk4: divclk4 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <32768>;
+               clock-output-names = "divclk4";
 
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&divclk4_pin_a>;
-               };
+               pinctrl-names = "default";
+               pinctrl-0 = <&divclk4_pin_a>;
        };
 
        gpio-keys {
index d1066ed..f8e9d90 100644 (file)
        qcom,pmic-id = <0x20009 0x2000a 0x00 0x00>;
        qcom,board-id = <31 0>;
 
-       clocks {
-               divclk2_haptics: divclk2 {
-                       compatible = "fixed-clock";
-                       #clock-cells = <0>;
-                       clock-frequency = <32768>;
-                       clock-output-names = "divclk2";
-
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&divclk2_pin_a>;
-               };
+       divclk2_haptics: divclk2 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <32768>;
+               clock-output-names = "divclk2";
+
+               pinctrl-names = "default";
+               pinctrl-0 = <&divclk2_pin_a>;
        };
 };
 
index 3c3b628..eaa43f0 100644 (file)
                        compatible = "qcom,pmm8654au-gpio", "qcom,spmi-gpio";
                        reg = <0x8800>;
                        gpio-controller;
-                       gpio-ranges = <&pmm8654au_2_gpios 0 0 12>;
+                       gpio-ranges = <&pmm8654au_1_gpios 0 0 12>;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
index 08a3ad3..de0a1f2 100644 (file)
                simple-audio-card,format = "i2s";
                simple-audio-card,name = "Haikou,I2S-codec";
                simple-audio-card,mclk-fs = <512>;
+               simple-audio-card,frame-master = <&sgtl5000_codec>;
+               simple-audio-card,bitclock-master = <&sgtl5000_codec>;
 
-               simple-audio-card,codec {
-                       clocks = <&sgtl5000_clk>;
+               sgtl5000_codec: simple-audio-card,codec {
                        sound-dai = <&sgtl5000>;
+                       // Prevent the dai subsystem from overwriting the clock
+                       // frequency. We are using a fixed-frequency oscillator.
+                       system-clock-fixed;
                };
 
                simple-audio-card,cpu {
-                       bitclock-master;
-                       frame-master;
                        sound-dai = <&i2s0_8ch>;
                };
        };
index 7dccbe8..f2279aa 100644 (file)
 
 &i2s0 {
        pinctrl-0 = <&i2s0_2ch_bus>;
+       pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
        rockchip,capture-channels = <2>;
        rockchip,playback-channels = <2>;
        status = "okay";
index 9da0b6d..5bc2d4f 100644 (file)
                                        <4 RK_PA0 1 &pcfg_pull_none>;
                        };
 
+                       i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
+                               rockchip,pins =
+                                       <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+                                       <3 RK_PD1 1 &pcfg_pull_none>,
+                                       <3 RK_PD2 1 &pcfg_pull_none>,
+                                       <3 RK_PD3 1 &pcfg_pull_none>,
+                                       <3 RK_PD7 1 &pcfg_pull_none>,
+                                       <4 RK_PA0 1 &pcfg_pull_none>;
+                       };
+
                        i2s0_8ch_bus: i2s0-8ch-bus {
                                rockchip,pins =
                                        <3 RK_PD0 1 &pcfg_pull_none>,
index 2f1026f..20f72cd 100644 (file)
@@ -948,6 +948,8 @@ void __init setup_arch(char **cmdline_p)
 
        /* Parse memory topology */
        mem_topology_setup();
+       /* Set max_mapnr before paging_init() */
+       set_max_mapnr(max_pfn);
 
        /*
         * Release secondary cpus out of their spinloops at 0x60 now that
index 8b121df..07e8f4f 100644 (file)
@@ -288,7 +288,6 @@ void __init mem_init(void)
 #endif
 
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-       set_max_mapnr(max_pfn);
 
        kasan_late_init();
 
index 3ba9fe4..4d69bfb 100644 (file)
@@ -104,6 +104,8 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
 /* Embedded type MMU with HW exec support. This is a bit more complicated
  * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
  * instead we "filter out" the exec permission for non clean pages.
+ *
+ * This is also called once for the folio. So only work with folio->flags here.
  */
 static inline pte_t set_pte_filter(pte_t pte)
 {
@@ -190,29 +192,39 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
                pte_t pte, unsigned int nr)
 {
-       /*
-        * Make sure hardware valid bit is not set. We don't do
-        * tlb flush for this update.
-        */
-       VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
 
        /* Note: mm->context.id might not yet have been assigned as
         * this context might not have been activated yet when this
-        * is called.
+        * is called. Filter the pte value and use the filtered value
+        * to setup all the ptes in the range.
         */
        pte = set_pte_filter(pte);
 
-       /* Perform the setting of the PTE */
-       arch_enter_lazy_mmu_mode();
+       /*
+        * We don't need to call arch_enter/leave_lazy_mmu_mode()
+        * because we expect set_ptes to be only be used on not present
+        * and not hw_valid ptes. Hence there is no translation cache flush
+        * involved that need to be batched.
+        */
        for (;;) {
+
+               /*
+                * Make sure hardware valid bit is not set. We don't do
+                * tlb flush for this update.
+                */
+               VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+
+               /* Perform the setting of the PTE */
                __set_pte_at(mm, addr, ptep, pte, 0);
                if (--nr == 0)
                        break;
                ptep++;
-               pte = __pte(pte_val(pte) + (1UL << PTE_RPN_SHIFT));
                addr += PAGE_SIZE;
+               /*
+                * increment the pfn.
+                */
+               pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte)));
        }
-       arch_leave_lazy_mmu_mode();
 }
 
 void unmap_kernel_page(unsigned long va)
index d607ab0..9c48fec 100644 (file)
@@ -273,11 +273,9 @@ config RISCV_DMA_NONCOHERENT
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB
-       select DMA_DIRECT_REMAP if MMU
 
 config RISCV_NONSTANDARD_CACHE_OPS
        bool
-       depends on RISCV_DMA_NONCOHERENT
        help
          This enables function pointer support for non-standard noncoherent
          systems to handle cache management.
@@ -550,6 +548,7 @@ config RISCV_ISA_ZICBOM
        depends on RISCV_ALTERNATIVE
        default y
        select RISCV_DMA_NONCOHERENT
+       select DMA_DIRECT_REMAP
        help
           Adds support to dynamically detect the presence of the ZICBOM
           extension (Cache Block Management Operations) and enable its
index 566bcef..e2c731c 100644 (file)
@@ -77,6 +77,7 @@ config ERRATA_THEAD_PBMT
 config ERRATA_THEAD_CMO
        bool "Apply T-Head cache management errata"
        depends on ERRATA_THEAD && MMU
+       select DMA_DIRECT_REMAP
        select RISCV_DMA_NONCOHERENT
        default y
        help
index 12ebe97..2c02358 100644 (file)
                };
 
                ss-pins {
-                       pinmux = <GPIOMUX(48, GPOUT_SYS_SPI0_FSS,
+                       pinmux = <GPIOMUX(49, GPOUT_SYS_SPI0_FSS,
                                              GPOEN_ENABLE,
                                              GPI_SYS_SPI0_FSS)>;
                        bias-disable;
index ce70818..ff36470 100644 (file)
                interrupt-parent = <&plic>;
                #address-cells = <2>;
                #size-cells = <2>;
+               dma-noncoherent;
                ranges;
 
                plic: interrupt-controller@ffd8000000 {
index 6115d75..90d4ba3 100644 (file)
@@ -72,7 +72,7 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
                }
                pagefault_out_of_memory();
                return;
-       } else if (fault & VM_FAULT_SIGBUS) {
+       } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
                /* Kernel mode? Handle exceptions or die */
                if (!user_mode(regs)) {
                        no_context(regs, addr);
index e4a2ace..b52f021 100644 (file)
@@ -183,15 +183,22 @@ void set_huge_pte_at(struct mm_struct *mm,
                     pte_t pte,
                     unsigned long sz)
 {
+       unsigned long hugepage_shift;
        int i, pte_num;
 
-       if (!pte_napot(pte)) {
-               set_pte_at(mm, addr, ptep, pte);
-               return;
-       }
+       if (sz >= PGDIR_SIZE)
+               hugepage_shift = PGDIR_SHIFT;
+       else if (sz >= P4D_SIZE)
+               hugepage_shift = P4D_SHIFT;
+       else if (sz >= PUD_SIZE)
+               hugepage_shift = PUD_SHIFT;
+       else if (sz >= PMD_SIZE)
+               hugepage_shift = PMD_SHIFT;
+       else
+               hugepage_shift = PAGE_SHIFT;
 
-       pte_num = napot_pte_num(napot_cont_order(pte));
-       for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
+       pte_num = sz >> hugepage_shift;
+       for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
                set_pte_at(mm, addr, ptep, pte);
 }
 
index 9760194..18be8b9 100644 (file)
@@ -940,9 +940,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
        if (status == 0)
                return 0;
 
-       /* Disable global interrupt before handling local buttress interrupts */
-       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
-
        if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
                ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
                         REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
@@ -974,9 +971,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
        else
                REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
 
-       /* Re-enable global interrupt */
-       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-
        if (schedule_recovery)
                ivpu_pm_schedule_recovery(vdev);
 
@@ -988,9 +982,14 @@ static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
        struct ivpu_device *vdev = ptr;
        u32 ret_irqv, ret_irqb;
 
+       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
        ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
        ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
 
+       /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
+       REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
        return IRQ_RETVAL(ret_irqb | ret_irqv);
 }
 
index f96bf32..7d88db4 100644 (file)
@@ -3339,6 +3339,16 @@ static int acpi_nfit_add(struct acpi_device *adev)
        acpi_size sz;
        int rc = 0;
 
+       rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
+                                            acpi_nfit_notify);
+       if (rc)
+               return rc;
+
+       rc = devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
+                                       adev);
+       if (rc)
+               return rc;
+
        status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
        if (ACPI_FAILURE(status)) {
                /* The NVDIMM root device allows OS to trigger enumeration of
@@ -3386,17 +3396,7 @@ static int acpi_nfit_add(struct acpi_device *adev)
        if (rc)
                return rc;
 
-       rc = devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
-       if (rc)
-               return rc;
-
-       rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
-                                            acpi_nfit_notify);
-       if (rc)
-               return rc;
-
-       return devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
-                                       adev);
+       return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
 }
 
 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
index a57677f..d6e5e3a 100644 (file)
@@ -3,7 +3,7 @@ menu "Cache Drivers"
 
 config AX45MP_L2_CACHE
        bool "Andes Technology AX45MP L2 Cache controller"
-       depends on RISCV_DMA_NONCOHERENT
+       depends on RISCV
        select RISCV_NONSTANDARD_CACHE_OPS
        help
          Support for the L2 cache controller on Andes Technology AX45MP platforms.
index 868bc7a..9b2824e 100644 (file)
@@ -749,9 +749,14 @@ static struct ti_dt_clk omap44xx_clks[] = {
        DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
        DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
        DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+       DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
+       DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
+       DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
        DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
+       DT_CLK("48096000.mcbsp", "prcm_fck", "l4-per-clkctrl:00c0:26"),
        DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
        DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
+       DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
        DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
        DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
        DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
index b4aff76..74dfd58 100644 (file)
@@ -565,15 +565,19 @@ static struct ti_dt_clk omap54xx_clks[] = {
        DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
        DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
        DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+       DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
        DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
        DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+       DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
        DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
        DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+       DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
        DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
        DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
        DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
        DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
        DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+       DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
        DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
        DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
        DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
index 05d562e..44b19e6 100644 (file)
@@ -54,7 +54,7 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
        enum proc_cn_mcast_op mc_op;
        uintptr_t val;
 
-       if (!dsk || !data)
+       if (!dsk || !dsk->sk_user_data || !data)
                return 0;
 
        ptr = (__u32 *)data;
index 59a4c02..154590e 100644 (file)
@@ -35,6 +35,9 @@ struct virtio_crypto {
        struct virtqueue *ctrl_vq;
        struct data_queue *data_vq;
 
+       /* Work struct for config space updates */
+       struct work_struct config_work;
+
        /* To protect the vq operations for the controlq */
        spinlock_t ctrl_lock;
 
index 94849fa..43a0838 100644 (file)
@@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
        virtcrypto_free_queues(vcrypto);
 }
 
+static void vcrypto_config_changed_work(struct work_struct *work)
+{
+       struct virtio_crypto *vcrypto =
+               container_of(work, struct virtio_crypto, config_work);
+
+       virtcrypto_update_status(vcrypto);
+}
+
 static int virtcrypto_probe(struct virtio_device *vdev)
 {
        int err = -EFAULT;
@@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev)
        if (err)
                goto free_engines;
 
+       INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
+
        return 0;
 
 free_engines:
@@ -490,6 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
 
        dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
 
+       flush_work(&vcrypto->config_work);
        if (virtcrypto_dev_started(vcrypto))
                virtcrypto_dev_stop(vcrypto);
        virtio_reset_device(vdev);
@@ -504,7 +515,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev)
 {
        struct virtio_crypto *vcrypto = vdev->priv;
 
-       virtcrypto_update_status(vcrypto);
+       schedule_work(&vcrypto->config_work);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -512,6 +523,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
 {
        struct virtio_crypto *vcrypto = vdev->priv;
 
+       flush_work(&vcrypto->config_work);
        virtio_reset_device(vdev);
        virtcrypto_free_unused_reqs(vcrypto);
        if (virtcrypto_dev_started(vcrypto))
index 508eab3..a48a58e 100644 (file)
@@ -114,11 +114,11 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
                dsp_chan->idx = i % 2;
                dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
                if (IS_ERR(dsp_chan->ch)) {
-                       kfree(dsp_chan->name);
                        ret = PTR_ERR(dsp_chan->ch);
                        if (ret != -EPROBE_DEFER)
                                dev_err(dev, "Failed to request mbox chan %s ret %d\n",
                                        chan_name, ret);
+                       kfree(dsp_chan->name);
                        goto out;
                }
 
index 7d6daf8..e036011 100644 (file)
@@ -1103,7 +1103,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
                if (unlikely(ret))
                        goto error;
 
-               ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base);
+               ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
                drm_exec_retry_on_contention(&ctx->exec);
                if (unlikely(ret))
                        goto error;
index efdb1c4..d93a896 100644 (file)
@@ -65,7 +65,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
        }
 
        amdgpu_sync_create(&p->sync);
-       drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+       drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+                     DRM_EXEC_IGNORE_DUPLICATES);
        return 0;
 }
 
index aac52d9..76549c2 100644 (file)
@@ -55,6 +55,10 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
                return true;
        default:
        case AMDGPU_CTX_PRIORITY_UNSET:
+               /* UNSET priority is not valid and we don't carry that
+                * around, but set it to NORMAL in the only place this
+                * function is called, amdgpu_ctx_ioctl().
+                */
                return false;
        }
 }
@@ -95,9 +99,6 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
                                      int32_t priority)
 {
-       if (!amdgpu_ctx_priority_is_valid(priority))
-               return -EINVAL;
-
        /* NORMAL and below are accessible by everyone */
        if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
                return 0;
@@ -632,8 +633,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
        return 0;
 }
 
-
-
 static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
                                    struct amdgpu_fpriv *fpriv, uint32_t id,
                                    bool set, u32 *stable_pstate)
@@ -676,8 +675,10 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
        id = args->in.ctx_id;
        priority = args->in.priority;
 
-       /* For backwards compatibility reasons, we need to accept
-        * ioctls with garbage in the priority field */
+       /* For backwards compatibility, we need to accept ioctls with garbage
+        * in the priority field. Garbage values in the priority field, result
+        * in the priority being set to NORMAL.
+        */
        if (!amdgpu_ctx_priority_is_valid(priority))
                priority = AMDGPU_CTX_PRIORITY_NORMAL;
 
index 6a8494f..fe8ba9e 100644 (file)
@@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
        bool bL1SS = false;
        bool bClkReqSupport = true;
 
-       if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
+       if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
                return;
 
        if (adev->flags & AMD_IS_APU ||
index ed96cfc..8c929ef 100644 (file)
@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
        struct drm_dp_mst_branch *found_mstb;
        struct drm_dp_mst_port *port;
 
+       if (!mstb)
+               return NULL;
+
        if (memcmp(mstb->guid, guid, 16) == 0)
                return mstb;
 
 
        list_for_each_entry(port, &mstb->ports, next) {
-               if (!port->mstb)
-                       continue;
-
                found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
 
                if (found_mstb)
index 0b414ea..2c0f1f3 100644 (file)
@@ -376,9 +376,26 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
         * driver threads, but also with hardware/firmware agents.  A dedicated
         * locking register is used.
         */
-       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
+               /*
+                * The steering control and semaphore registers are inside an
+                * "always on" power domain with respect to RC6.  However there
+                * are some issues if higher-level platform sleep states are
+                * entering/exiting at the same time these registers are
+                * accessed.  Grabbing GT forcewake and holding it over the
+                * entire lock/steer/unlock cycle ensures that those sleep
+                * states have been fully exited before we access these
+                * registers.  This wakeref will be released in the unlock
+                * routine.
+                *
+                * This is expected to become a formally documented/numbered
+                * workaround soon.
+                */
+               intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
+
                err = wait_for(intel_uncore_read_fw(gt->uncore,
                                                    MTL_STEER_SEMAPHORE) == 0x1, 100);
+       }
 
        /*
         * Even on platforms with a hardware lock, we'll continue to grab
@@ -415,8 +432,11 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
 {
        spin_unlock_irqrestore(&gt->mcr_lock, flags);
 
-       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
                intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
+
+               intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT);
+       }
 }
 
 /**
index 04bc1f4..59e1e21 100644 (file)
@@ -482,8 +482,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
 static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
 {
        return !(oa_report_id(stream, report) &
-              stream->perf->gen8_valid_ctx_bit) &&
-              GRAPHICS_VER(stream->perf->i915) <= 11;
+              stream->perf->gen8_valid_ctx_bit);
 }
 
 static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
@@ -5106,6 +5105,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915)
                perf->gen8_valid_ctx_bit = BIT(16);
                break;
        case 12:
+               perf->gen8_valid_ctx_bit = BIT(16);
                /*
                 * Calculate offset at runtime in oa_pin_context for gen12 and
                 * cache the value in perf->ctx_oactxctrl_offset.
index d35973b..7b1076b 100644 (file)
@@ -832,9 +832,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
 
 static void i915_pmu_event_stop(struct perf_event *event, int flags)
 {
+       struct drm_i915_private *i915 =
+               container_of(event->pmu, typeof(*i915), pmu.base);
+       struct i915_pmu *pmu = &i915->pmu;
+
+       if (pmu->closed)
+               goto out;
+
        if (flags & PERF_EF_UPDATE)
                i915_pmu_event_read(event);
        i915_pmu_disable(event);
+
+out:
        event->hw.state = PERF_HES_STOPPED;
 }
 
index fa7a883..1df22a8 100644 (file)
@@ -5,5 +5,7 @@ config DRM_LOGICVC
        select DRM_KMS_HELPER
        select DRM_KMS_DMA_HELPER
        select DRM_GEM_DMA_HELPER
+       select REGMAP
+       select REGMAP_MMIO
        help
          DRM display driver for the logiCVC programmable logic block from Xylon
index 1efd179..b82b898 100644 (file)
@@ -678,7 +678,7 @@ ph_state(struct dchannel *dch)
 }
 
 /*
- * disable/enable BChannel for desired protocoll
+ * disable/enable BChannel for desired protocol
  */
 static int
 hfcsusb_setup_bch(struct bchannel *bch, int protocol)
index ca66b74..d7c274a 100644 (file)
@@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
 {
        struct adin1110_priv *priv = port_priv->priv;
        u32 header_len = ADIN1110_RD_HEADER_LEN;
-       struct spi_transfer t;
+       struct spi_transfer t = {0};
        u32 frame_size_no_fcs;
        struct sk_buff *rxb;
        u32 frame_size;
index 4d4140b..f3305c4 100644 (file)
@@ -2170,7 +2170,7 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
 static struct platform_driver xgene_enet_driver = {
        .driver = {
                   .name = "xgene-enet",
-                  .of_match_table = of_match_ptr(xgene_enet_of_match),
+                  .of_match_table = xgene_enet_of_match,
                   .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
        },
        .probe = xgene_enet_probe,
index 8d719f8..76de553 100644 (file)
@@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
                 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
        ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
                                    &param, &val, 30000);
+       if (ret)
+               return ret;
 
        /* If we have version number support, then check to see that the new
         * firmware got loaded properly.
index 6e310a5..55bb0b5 100644 (file)
@@ -580,7 +580,6 @@ struct i40e_pf {
 #define I40E_FLAG_DISABLE_FW_LLDP              BIT(24)
 #define I40E_FLAG_RS_FEC                       BIT(25)
 #define I40E_FLAG_BASE_R_FEC                   BIT(26)
-#define I40E_FLAG_VF_VLAN_PRUNING              BIT(27)
 /* TOTAL_PORT_SHUTDOWN
  * Allows to physically disable the link on the NIC's port.
  * If enabled, (after link down request from the OS)
@@ -603,6 +602,7 @@ struct i40e_pf {
  *   in abilities field of i40e_aq_set_phy_config structure
  */
 #define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED  BIT(27)
+#define I40E_FLAG_VF_VLAN_PRUNING              BIT(28)
 
        struct i40e_client_instance *cinst;
        bool stat_offsets_loaded;
index 0b3a27f..b047c58 100644 (file)
@@ -2544,7 +2544,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
                        rx_buffer = i40e_rx_bi(rx_ring, ntp);
                        i40e_inc_ntp(rx_ring);
                        i40e_reuse_rx_page(rx_ring, rx_buffer);
-                       cleaned_count++;
+                       /* Update ntc and bump cleaned count if not in the
+                        * middle of mb packet.
+                        */
+                       if (rx_ring->next_to_clean == ntp) {
+                               rx_ring->next_to_clean =
+                                       rx_ring->next_to_process;
+                               cleaned_count++;
+                       }
                        continue;
                }
 
@@ -2847,7 +2854,7 @@ tx_only:
                return budget;
        }
 
-       if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+       if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
                q_vector->arm_wb_state = false;
 
        /* Exit the polling mode, but don't re-enable interrupts if stack might
index 37f41c8..7d991e4 100644 (file)
@@ -437,12 +437,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 next_to_process = rx_ring->next_to_process;
        u16 next_to_clean = rx_ring->next_to_clean;
-       u16 count_mask = rx_ring->count - 1;
        unsigned int xdp_res, xdp_xmit = 0;
        struct xdp_buff *first = NULL;
+       u32 count = rx_ring->count;
        struct bpf_prog *xdp_prog;
+       u32 entries_to_alloc;
        bool failure = false;
-       u16 cleaned_count;
 
        if (next_to_process != next_to_clean)
                first = *i40e_rx_bi(rx_ring, next_to_clean);
@@ -475,7 +475,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                                                      qword);
                        bi = *i40e_rx_bi(rx_ring, next_to_process);
                        xsk_buff_free(bi);
-                       next_to_process = (next_to_process + 1) & count_mask;
+                       if (++next_to_process == count)
+                               next_to_process = 0;
                        continue;
                }
 
@@ -493,7 +494,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
                        break;
 
-               next_to_process = (next_to_process + 1) & count_mask;
+               if (++next_to_process == count)
+                       next_to_process = 0;
 
                if (i40e_is_non_eop(rx_ring, rx_desc))
                        continue;
@@ -513,10 +515,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
 
        rx_ring->next_to_clean = next_to_clean;
        rx_ring->next_to_process = next_to_process;
-       cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
 
-       if (cleaned_count >= I40E_RX_BUFFER_WRITE)
-               failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+       entries_to_alloc = I40E_DESC_UNUSED(rx_ring);
+       if (entries_to_alloc >= I40E_RX_BUFFER_WRITE)
+               failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc);
 
        i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
        i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -752,14 +754,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
 
 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
 {
-       u16 count_mask = rx_ring->count - 1;
        u16 ntc = rx_ring->next_to_clean;
        u16 ntu = rx_ring->next_to_use;
 
-       for ( ; ntc != ntu; ntc = (ntc + 1)  & count_mask) {
+       while (ntc != ntu) {
                struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
 
                xsk_buff_free(rx_bi);
+               ntc++;
+               if (ntc >= rx_ring->count)
+                       ntc = 0;
        }
 }
 
index 6a2e6d6..b3434db 100644 (file)
@@ -1437,9 +1437,9 @@ void iavf_down(struct iavf_adapter *adapter)
                        adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
                if (!list_empty(&adapter->adv_rss_list_head))
                        adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
-               adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
        }
 
+       adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
        mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 }
 
@@ -4982,8 +4982,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_WORK(&adapter->finish_config, iavf_finish_config);
        INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
        INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
-       queue_delayed_work(adapter->wq, &adapter->watchdog_task,
-                          msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
 
        /* Setup the wait queue for indicating transition to down status */
        init_waitqueue_head(&adapter->down_waitqueue);
@@ -4994,6 +4992,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Setup the wait queue for indicating virtchannel events */
        init_waitqueue_head(&adapter->vc_waitqueue);
 
+       queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+                          msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+       /* Initialization goes on in the work. Do not add more of it below. */
        return 0;
 
 err_ioremap:
index 319ed60..4ee8499 100644 (file)
@@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
        if (err)
                goto err_out_w_lock;
 
-       igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+       err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+       if (err)
+               goto err_out_input_filter;
 
        spin_unlock(&adapter->nfc_lock);
        return 0;
 
+err_out_input_filter:
+       igb_erase_filter(adapter, input);
 err_out_w_lock:
        spin_unlock(&adapter->nfc_lock);
 err_out:
index 7ab6dd5..dd8a9d2 100644 (file)
@@ -1817,7 +1817,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
        struct igc_adapter *adapter = netdev_priv(netdev);
        struct net_device *dev = adapter->netdev;
        struct igc_hw *hw = &adapter->hw;
-       u32 advertising;
+       u16 advertised = 0;
 
        /* When adapter in resetting mode, autoneg/speed/duplex
         * cannot be changed
@@ -1842,18 +1842,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
        while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
 
-       ethtool_convert_link_mode_to_legacy_u32(&advertising,
-                                               cmd->link_modes.advertising);
-       /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
-        * We have to check this and convert it to ADVERTISE_2500_FULL
-        * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
-        */
-       if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
-               advertising |= ADVERTISE_2500_FULL;
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                 2500baseT_Full))
+               advertised |= ADVERTISE_2500_FULL;
+
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                 1000baseT_Full))
+               advertised |= ADVERTISE_1000_FULL;
+
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                 100baseT_Full))
+               advertised |= ADVERTISE_100_FULL;
+
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                 100baseT_Half))
+               advertised |= ADVERTISE_100_HALF;
+
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                 10baseT_Full))
+               advertised |= ADVERTISE_10_FULL;
+
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                 10baseT_Half))
+               advertised |= ADVERTISE_10_HALF;
 
        if (cmd->base.autoneg == AUTONEG_ENABLE) {
                hw->mac.autoneg = 1;
-               hw->phy.autoneg_advertised = advertising;
+               hw->phy.autoneg_advertised = advertised;
                if (adapter->fc_autoneg)
                        hw->fc.requested_mode = igc_fc_default;
        } else {
index 6351a2d..361b900 100644 (file)
@@ -4364,7 +4364,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
                unsigned int entry = dirty_tx % NUM_TX_DESC;
                u32 status;
 
-               status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+               status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
                if (status & DescOwn)
                        break;
 
@@ -4394,7 +4394,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
                 * If skb is NULL then we come here again once a tx irq is
                 * triggered after the last fragment is marked transmitted.
                 */
-               if (tp->cur_tx != dirty_tx && skb)
+               if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
                        rtl8169_doorbell(tp);
        }
 }
@@ -4427,7 +4427,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
                dma_addr_t addr;
                u32 status;
 
-               status = le32_to_cpu(desc->opts1);
+               status = le32_to_cpu(READ_ONCE(desc->opts1));
                if (status & DescOwn)
                        break;
 
index 834f000..30ebef8 100644 (file)
@@ -629,14 +629,14 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
                        }
                        if (child_ip_tos_mask != old->child_ip_tos_mask) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x",
+                                                      "Pseudo encap match for TOS mask %#04x conflicts with existing mask %#04x",
                                                       child_ip_tos_mask,
                                                       old->child_ip_tos_mask);
                                return -EEXIST;
                        }
                        if (child_udp_sport_mask != old->child_udp_sport_mask) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x",
+                                                      "Pseudo encap match for UDP src port mask %#x conflicts with existing mask %#x",
                                                       child_udp_sport_mask,
                                                       old->child_udp_sport_mask);
                                return -EEXIST;
@@ -1081,7 +1081,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
                        /* check that we do not decrement ttl twice */
                        if (!efx_tc_flower_action_order_ok(act,
                                                           EFX_TC_AO_DEC_TTL)) {
-                               NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+                               NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
                                return -EOPNOTSUPP;
                        }
                        act->do_ttl_dec = 1;
@@ -1106,7 +1106,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
                        /* check that we do not decrement hoplimit twice */
                        if (!efx_tc_flower_action_order_ok(act,
                                                           EFX_TC_AO_DEC_TTL)) {
-                               NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+                               NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
                                return -EOPNOTSUPP;
                        }
                        act->do_ttl_dec = 1;
@@ -1120,7 +1120,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
        }
 
        NL_SET_ERR_MSG_FMT_MOD(extack,
-                              "Unsupported: ttl add action type %x %x %x/%x",
+                              "ttl add action type %x %x %x/%x is not supported",
                               fa->mangle.htype, fa->mangle.offset,
                               fa->mangle.val, fa->mangle.mask);
        return -EOPNOTSUPP;
@@ -1164,7 +1164,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                case 0:
                        if (fa->mangle.mask) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Unsupported: mask (%#x) of eth.dst32 mangle",
+                                                      "mask (%#x) of eth.dst32 mangle is not supported",
                                                       fa->mangle.mask);
                                return -EOPNOTSUPP;
                        }
@@ -1184,7 +1184,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                                mung->dst_mac_16 = 1;
                        } else {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
+                                                      "mask (%#x) of eth+4 mangle is not high or low 16b",
                                                       fa->mangle.mask);
                                return -EOPNOTSUPP;
                        }
@@ -1192,7 +1192,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                case 8:
                        if (fa->mangle.mask) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Unsupported: mask (%#x) of eth.src32 mangle",
+                                                      "mask (%#x) of eth.src32 mangle is not supported",
                                                       fa->mangle.mask);
                                return -EOPNOTSUPP;
                        }
@@ -1201,7 +1201,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                        mung->src_mac_32 = 1;
                        return efx_tc_complete_mac_mangle(efx, act, mung, extack);
                default:
-                       NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
+                       NL_SET_ERR_MSG_FMT_MOD(extack, "mangle eth+%u %x/%x is not supported",
                                               fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
                        return -EOPNOTSUPP;
                }
@@ -1217,7 +1217,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                        /* check that pedit applies to ttl only */
                        if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
+                                                      "mask (%#x) out of range, only support mangle action on ipv4.ttl",
                                                       fa->mangle.mask);
                                return -EOPNOTSUPP;
                        }
@@ -1227,7 +1227,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                         */
                        if (match->mask.ip_ttl != U8_MAX) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
+                                                      "only support mangle ttl when we have an exact match, current mask (%#x)",
                                                       match->mask.ip_ttl);
                                return -EOPNOTSUPP;
                        }
@@ -1237,7 +1237,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                         */
                        if (match->value.ip_ttl == 0) {
                                NL_SET_ERR_MSG_MOD(extack,
-                                                  "Unsupported: we cannot decrement ttl past 0");
+                                                  "decrement ttl past 0 is not supported");
                                return -EOPNOTSUPP;
                        }
 
@@ -1245,7 +1245,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                        if (!efx_tc_flower_action_order_ok(act,
                                                           EFX_TC_AO_DEC_TTL)) {
                                NL_SET_ERR_MSG_MOD(extack,
-                                                  "Unsupported: multiple dec ttl");
+                                                  "multiple dec ttl is not supported");
                                return -EOPNOTSUPP;
                        }
 
@@ -1259,7 +1259,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                        fallthrough;
                default:
                        NL_SET_ERR_MSG_FMT_MOD(extack,
-                                              "Unsupported: only support mangle on the ttl field (offset is %u)",
+                                              "only support mangle on the ttl field (offset is %u)",
                                               fa->mangle.offset);
                        return -EOPNOTSUPP;
                }
@@ -1275,7 +1275,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                        /* check that pedit applies to ttl only */
                        if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
+                                                      "mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
                                                       fa->mangle.mask);
 
                                return -EOPNOTSUPP;
@@ -1286,7 +1286,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                         */
                        if (match->mask.ip_ttl != U8_MAX) {
                                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                                      "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
+                                                      "only support hop_limit when we have an exact match, current mask (%#x)",
                                                       match->mask.ip_ttl);
                                return -EOPNOTSUPP;
                        }
@@ -1296,7 +1296,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                         */
                        if (match->value.ip_ttl == 0) {
                                NL_SET_ERR_MSG_MOD(extack,
-                                                  "Unsupported: we cannot decrement hop_limit past 0");
+                                                  "decrementing hop_limit past 0 is not supported");
                                return -EOPNOTSUPP;
                        }
 
@@ -1304,7 +1304,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                        if (!efx_tc_flower_action_order_ok(act,
                                                           EFX_TC_AO_DEC_TTL)) {
                                NL_SET_ERR_MSG_MOD(extack,
-                                                  "Unsupported: multiple dec ttl");
+                                                  "multiple dec ttl is not supported");
                                return -EOPNOTSUPP;
                        }
 
@@ -1318,7 +1318,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
                        fallthrough;
                default:
                        NL_SET_ERR_MSG_FMT_MOD(extack,
-                                              "Unsupported: only support mangle on the hop_limit field");
+                                              "only support mangle on the hop_limit field");
                        return -EOPNOTSUPP;
                }
        default:
index ed1a5a3..5801f4d 100644 (file)
@@ -1197,6 +1197,17 @@ static int stmmac_init_phy(struct net_device *dev)
        return ret;
 }
 
+static void stmmac_set_half_duplex(struct stmmac_priv *priv)
+{
+       /* Half-Duplex can only work with single tx queue */
+       if (priv->plat->tx_queues_to_use > 1)
+               priv->phylink_config.mac_capabilities &=
+                       ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+       else
+               priv->phylink_config.mac_capabilities |=
+                       (MAC_10HD | MAC_100HD | MAC_1000HD);
+}
+
 static int stmmac_phy_setup(struct stmmac_priv *priv)
 {
        struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1228,10 +1239,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
                                                MAC_10FD | MAC_100FD |
                                                MAC_1000FD;
 
-       /* Half-Duplex can only work with single queue */
-       if (priv->plat->tx_queues_to_use <= 1)
-               priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
-                                                        MAC_1000HD;
+       stmmac_set_half_duplex(priv);
 
        /* Get the MAC specific capabilities */
        stmmac_mac_phylink_get_caps(priv);
@@ -7172,6 +7180,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
                        priv->rss.table[i] = ethtool_rxfh_indir_default(i,
                                                                        rx_cnt);
 
+       stmmac_set_half_duplex(priv);
        stmmac_napi_add(dev);
 
        if (netif_running(dev))
index dc14a66..44488c1 100644 (file)
@@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
                key_index = wl->current_key;
 
        if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
-               /* reques to change default key index */
+               /* request to change default key index */
                pr_debug("%s: request to change default key to %d\n",
                         __func__, key_index);
                wl->current_key = key_index;
index 144ec62..b22596b 100644 (file)
@@ -872,8 +872,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
 
        skb_dst_update_pmtu_no_confirm(skb, mtu);
 
-       if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
-           mtu < ntohs(iph->tot_len)) {
+       if (iph->frag_off & htons(IP_DF) &&
+           ((!skb_is_gso(skb) && skb->len > mtu) ||
+            (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
                netdev_dbg(dev, "packet too big, fragmentation needed\n");
                icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                              htonl(mtu));
index a03490b..cc7ddc4 100644 (file)
@@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
 
 static void adf7242_debugfs_init(struct adf7242_local *lp)
 {
-       char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
+       char debugfs_dir_name[DNAME_INLINE_LEN + 1];
 
-       strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
+       snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
+                "adf7242-%s", dev_name(&lp->spi->dev));
 
        lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
 
index 0c13d99..afb20c0 100644 (file)
@@ -764,7 +764,7 @@ enum rtl_register_content {
 
 /* rtl8152 flags */
 enum rtl8152_flags {
-       RTL8152_UNPLUG = 0,
+       RTL8152_INACCESSIBLE = 0,
        RTL8152_SET_RX_MODE,
        WORK_ENABLE,
        RTL8152_LINK_CHG,
@@ -773,6 +773,9 @@ enum rtl8152_flags {
        SCHEDULE_TASKLET,
        GREEN_ETHERNET,
        RX_EPROTO,
+       IN_PRE_RESET,
+       PROBED_WITH_NO_ERRORS,
+       PROBE_SHOULD_RETRY,
 };
 
 #define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB              0x721e
@@ -953,6 +956,8 @@ struct r8152 {
        u8 version;
        u8 duplex;
        u8 autoneg;
+
+       unsigned int reg_access_reset_count;
 };
 
 /**
@@ -1200,6 +1205,96 @@ static unsigned int agg_buf_sz = 16384;
 
 #define RTL_LIMITED_TSO_SIZE   (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc))
 
+/* If register access fails then we block access and issue a reset. If this
+ * happens too many times in a row without a successful access then we stop
+ * trying to reset and just leave access blocked.
+ */
+#define REGISTER_ACCESS_MAX_RESETS     3
+
+static void rtl_set_inaccessible(struct r8152 *tp)
+{
+       set_bit(RTL8152_INACCESSIBLE, &tp->flags);
+       smp_mb__after_atomic();
+}
+
+static void rtl_set_accessible(struct r8152 *tp)
+{
+       clear_bit(RTL8152_INACCESSIBLE, &tp->flags);
+       smp_mb__after_atomic();
+}
+
+static
+int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request,
+                     __u8 requesttype, __u16 value, __u16 index, void *data,
+                     __u16 size, const char *msg_tag)
+{
+       struct usb_device *udev = tp->udev;
+       int ret;
+
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+               return -ENODEV;
+
+       ret = usb_control_msg(udev, pipe, request, requesttype,
+                             value, index, data, size,
+                             USB_CTRL_GET_TIMEOUT);
+
+       /* No need to issue a reset to report an error if the USB device got
+        * unplugged; just return immediately.
+        */
+       if (ret == -ENODEV)
+               return ret;
+
+       /* If the write was successful then we're done */
+       if (ret >= 0) {
+               tp->reg_access_reset_count = 0;
+               return ret;
+       }
+
+       dev_err(&udev->dev,
+               "Failed to %s %d bytes at %#06x/%#06x (%d)\n",
+               msg_tag, size, value, index, ret);
+
+       /* Block all future register access until we reset. Much of the code
+        * in the driver doesn't check for errors. Notably, many parts of the
+        * driver do a read/modify/write of a register value without
+        * confirming that the read succeeded. Writing back modified garbage
+        * like this can fully wedge the adapter, requiring a power cycle.
+        */
+       rtl_set_inaccessible(tp);
+
+       /* If probe hasn't yet finished, then we'll request a retry of the
+        * whole probe routine if we get any control transfer errors. We
+        * never have to clear this bit since we free/reallocate the whole "tp"
+        * structure if we retry probe.
+        */
+       if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) {
+               set_bit(PROBE_SHOULD_RETRY, &tp->flags);
+               return ret;
+       }
+
+       /* Failing to access registers in pre-reset is not surprising since we
+        * wouldn't be resetting if things were behaving normally. The register
+        * access we do in pre-reset isn't truly mandatory--we're just reusing
+        * the disable() function and trying to be nice by powering the
+        * adapter down before resetting it. Thus, if we're in pre-reset,
+        * we'll return right away and not try to queue up yet another reset.
+        * We know the post-reset is already coming.
+        */
+       if (test_bit(IN_PRE_RESET, &tp->flags))
+               return ret;
+
+       if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) {
+               usb_queue_reset_device(tp->intf);
+               tp->reg_access_reset_count++;
+       } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) {
+               dev_err(&udev->dev,
+                       "Tried to reset %d times; giving up.\n",
+                       REGISTER_ACCESS_MAX_RESETS);
+       }
+
+       return ret;
+}
+
 static
 int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
 {
@@ -1210,9 +1305,10 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
        if (!tmp)
                return -ENOMEM;
 
-       ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
-                             RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
-                             value, index, tmp, size, 500);
+       ret = r8152_control_msg(tp, tp->pipe_ctrl_in,
+                               RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+                               value, index, tmp, size, "read");
+
        if (ret < 0)
                memset(data, 0xff, size);
        else
@@ -1233,9 +1329,9 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
        if (!tmp)
                return -ENOMEM;
 
-       ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
-                             RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
-                             value, index, tmp, size, 500);
+       ret = r8152_control_msg(tp, tp->pipe_ctrl_out,
+                               RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
+                               value, index, tmp, size, "write");
 
        kfree(tmp);
 
@@ -1244,10 +1340,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
 
 static void rtl_set_unplug(struct r8152 *tp)
 {
-       if (tp->udev->state == USB_STATE_NOTATTACHED) {
-               set_bit(RTL8152_UNPLUG, &tp->flags);
-               smp_mb__after_atomic();
-       }
+       if (tp->udev->state == USB_STATE_NOTATTACHED)
+               rtl_set_inaccessible(tp);
 }
 
 static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -1256,7 +1350,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
        u16 limit = 64;
        int ret = 0;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        /* both size and indix must be 4 bytes align */
@@ -1300,7 +1394,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
        u16 byteen_start, byteen_end, byen;
        u16 limit = 512;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        /* both size and indix must be 4 bytes align */
@@ -1537,7 +1631,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
        struct r8152 *tp = netdev_priv(netdev);
        int ret;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        if (phy_id != R8152_PHY_ID)
@@ -1553,7 +1647,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
 {
        struct r8152 *tp = netdev_priv(netdev);
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        if (phy_id != R8152_PHY_ID)
@@ -1758,7 +1852,7 @@ static void read_bulk_callback(struct urb *urb)
        if (!tp)
                return;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1850,7 +1944,7 @@ static void write_bulk_callback(struct urb *urb)
        if (!test_bit(WORK_ENABLE, &tp->flags))
                return;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        if (!skb_queue_empty(&tp->tx_queue))
@@ -1871,7 +1965,7 @@ static void intr_callback(struct urb *urb)
        if (!test_bit(WORK_ENABLE, &tp->flags))
                return;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        switch (status) {
@@ -2615,7 +2709,7 @@ static void bottom_half(struct tasklet_struct *t)
 {
        struct r8152 *tp = from_tasklet(tp, t, tx_tl);
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -2658,7 +2752,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
        int ret;
 
        /* The rx would be stopped, so skip submitting */
-       if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) ||
            !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
                return 0;
 
@@ -3058,7 +3152,7 @@ static int rtl_enable(struct r8152 *tp)
 
 static int rtl8152_enable(struct r8152 *tp)
 {
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        set_tx_qlen(tp);
@@ -3145,7 +3239,7 @@ static int rtl8153_enable(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        set_tx_qlen(tp);
@@ -3177,7 +3271,7 @@ static void rtl_disable(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
@@ -3631,7 +3725,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
                }
 
                msleep(20);
-               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                        break;
        }
 
@@ -3663,6 +3757,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
                        int i;
 
                        for (i = 0; i < 500; i++) {
+                               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                                       return;
                                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                                    AUTOLOAD_DONE)
                                        break;
@@ -3703,6 +3799,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
                        int i;
 
                        for (i = 0; i < 500; i++) {
+                               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                                       return;
                                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                                    AUTOLOAD_DONE)
                                        break;
@@ -4046,6 +4144,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
        for (i = 0; wait && i < 5000; i++) {
                u32 ocp_data;
 
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+                       return -ENODEV;
+
                usleep_range(1000, 2000);
                ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
                if ((ocp_data & PATCH_READY) ^ check)
@@ -6002,7 +6103,7 @@ static int rtl8156_enable(struct r8152 *tp)
        u32 ocp_data;
        u16 speed;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        r8156_fc_parameter(tp);
@@ -6060,7 +6161,7 @@ static int rtl8156b_enable(struct r8152 *tp)
        u32 ocp_data;
        u16 speed;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        set_tx_qlen(tp);
@@ -6246,7 +6347,7 @@ out:
 
 static void rtl8152_up(struct r8152 *tp)
 {
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8152_aldps_en(tp, false);
@@ -6256,7 +6357,7 @@ static void rtl8152_up(struct r8152 *tp)
 
 static void rtl8152_down(struct r8152 *tp)
 {
-       if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
@@ -6271,7 +6372,7 @@ static void rtl8153_up(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153_u1u2en(tp, false);
@@ -6311,7 +6412,7 @@ static void rtl8153_down(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
@@ -6332,7 +6433,7 @@ static void rtl8153b_up(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153b_u1u2en(tp, false);
@@ -6356,7 +6457,7 @@ static void rtl8153b_down(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
@@ -6393,7 +6494,7 @@ static void rtl8153c_up(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153b_u1u2en(tp, false);
@@ -6474,7 +6575,7 @@ static void rtl8156_up(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153b_u1u2en(tp, false);
@@ -6547,7 +6648,7 @@ static void rtl8156_down(struct r8152 *tp)
 {
        u32 ocp_data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
@@ -6685,7 +6786,7 @@ static void rtl_work_func_t(struct work_struct *work)
        /* If the device is unplugged or !netif_running(), the workqueue
         * doesn't need to wake the device, and could return directly.
         */
-       if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev))
                return;
 
        if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6724,7 +6825,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
 {
        struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6851,7 +6952,7 @@ static int rtl8152_close(struct net_device *netdev)
        netif_stop_queue(netdev);
 
        res = usb_autopm_get_interface(tp->intf);
-       if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
+       if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                rtl_stop_rx(tp);
        } else {
@@ -6884,7 +6985,7 @@ static void r8152b_init(struct r8152 *tp)
        u32 ocp_data;
        u16 data;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        data = r8152_mdio_read(tp, MII_BMCR);
@@ -6928,7 +7029,7 @@ static void r8153_init(struct r8152 *tp)
        u16 data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153_u1u2en(tp, false);
@@ -6939,7 +7040,7 @@ static void r8153_init(struct r8152 *tp)
                        break;
 
                msleep(20);
-               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                        break;
        }
 
@@ -7068,7 +7169,7 @@ static void r8153b_init(struct r8152 *tp)
        u16 data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153b_u1u2en(tp, false);
@@ -7079,7 +7180,7 @@ static void r8153b_init(struct r8152 *tp)
                        break;
 
                msleep(20);
-               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                        break;
        }
 
@@ -7150,7 +7251,7 @@ static void r8153c_init(struct r8152 *tp)
        u16 data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153b_u1u2en(tp, false);
@@ -7170,7 +7271,7 @@ static void r8153c_init(struct r8152 *tp)
                        break;
 
                msleep(20);
-               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                        return;
        }
 
@@ -7999,7 +8100,7 @@ static void r8156_init(struct r8152 *tp)
        u16 data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8020,7 +8121,7 @@ static void r8156_init(struct r8152 *tp)
                        break;
 
                msleep(20);
-               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                        return;
        }
 
@@ -8095,7 +8196,7 @@ static void r8156b_init(struct r8152 *tp)
        u16 data;
        int i;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8129,7 +8230,7 @@ static void r8156b_init(struct r8152 *tp)
                        break;
 
                msleep(20);
-               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                        return;
        }
 
@@ -8255,7 +8356,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
        struct net_device *netdev;
 
-       if (!tp)
+       if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
                return 0;
 
        netdev = tp->netdev;
@@ -8270,7 +8371,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
        napi_disable(&tp->napi);
        if (netif_carrier_ok(netdev)) {
                mutex_lock(&tp->control);
+               set_bit(IN_PRE_RESET, &tp->flags);
                tp->rtl_ops.disable(tp);
+               clear_bit(IN_PRE_RESET, &tp->flags);
                mutex_unlock(&tp->control);
        }
 
@@ -8283,9 +8386,11 @@ static int rtl8152_post_reset(struct usb_interface *intf)
        struct net_device *netdev;
        struct sockaddr sa;
 
-       if (!tp)
+       if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
                return 0;
 
+       rtl_set_accessible(tp);
+
        /* reset the MAC address in case of policy change */
        if (determine_ethernet_addr(tp, &sa) >= 0) {
                rtnl_lock();
@@ -9158,7 +9263,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
        struct mii_ioctl_data *data = if_mii(rq);
        int res;
 
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return -ENODEV;
 
        res = usb_autopm_get_interface(tp->intf);
@@ -9260,7 +9365,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
 
 static void rtl8152_unload(struct r8152 *tp)
 {
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        if (tp->version != RTL_VER_01)
@@ -9269,7 +9374,7 @@ static void rtl8152_unload(struct r8152 *tp)
 
 static void rtl8153_unload(struct r8152 *tp)
 {
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153_power_cut_en(tp, false);
@@ -9277,7 +9382,7 @@ static void rtl8153_unload(struct r8152 *tp)
 
 static void rtl8153b_unload(struct r8152 *tp)
 {
-       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+       if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
                return;
 
        r8153b_power_cut_en(tp, false);
@@ -9487,16 +9592,29 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev)
        __le32 *tmp;
        u8 version;
        int ret;
+       int i;
 
        tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
        if (!tmp)
                return 0;
 
-       ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
-                             RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
-                             PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
-       if (ret > 0)
-               ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+       /* Retry up to 3 times in case there is a transitory error. We do this
+        * since retrying a read of the version is always safe and this
+        * function doesn't take advantage of r8152_control_msg().
+        */
+       for (i = 0; i < 3; i++) {
+               ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+                                     RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+                                     PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
+                                     USB_CTRL_GET_TIMEOUT);
+               if (ret > 0) {
+                       ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+                       break;
+               }
+       }
+
+       if (i != 0 && ret > 0)
+               dev_warn(&udev->dev, "Needed %d retries to read version\n", i);
 
        kfree(tmp);
 
@@ -9595,25 +9713,14 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
        return 0;
 }
 
-static int rtl8152_probe(struct usb_interface *intf,
-                        const struct usb_device_id *id)
+static int rtl8152_probe_once(struct usb_interface *intf,
+                             const struct usb_device_id *id, u8 version)
 {
        struct usb_device *udev = interface_to_usbdev(intf);
        struct r8152 *tp;
        struct net_device *netdev;
-       u8 version;
        int ret;
 
-       if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
-               return -ENODEV;
-
-       if (!rtl_check_vendor_ok(intf))
-               return -ENODEV;
-
-       version = rtl8152_get_version(intf);
-       if (version == RTL_VER_UNKNOWN)
-               return -ENODEV;
-
        usb_reset_device(udev);
        netdev = alloc_etherdev(sizeof(struct r8152));
        if (!netdev) {
@@ -9776,18 +9883,68 @@ static int rtl8152_probe(struct usb_interface *intf,
        else
                device_set_wakeup_enable(&udev->dev, false);
 
+       /* If we saw a control transfer error while probing then we may
+        * want to try probe() again. Consider this an error.
+        */
+       if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+               goto out2;
+
+       set_bit(PROBED_WITH_NO_ERRORS, &tp->flags);
        netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
 
        return 0;
 
+out2:
+       unregister_netdev(netdev);
+
 out1:
        tasklet_kill(&tp->tx_tl);
+       cancel_delayed_work_sync(&tp->hw_phy_work);
+       if (tp->rtl_ops.unload)
+               tp->rtl_ops.unload(tp);
+       rtl8152_release_firmware(tp);
        usb_set_intfdata(intf, NULL);
 out:
+       if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+               ret = -EAGAIN;
+
        free_netdev(netdev);
        return ret;
 }
 
+#define RTL8152_PROBE_TRIES    3
+
+static int rtl8152_probe(struct usb_interface *intf,
+                        const struct usb_device_id *id)
+{
+       u8 version;
+       int ret;
+       int i;
+
+       if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+               return -ENODEV;
+
+       if (!rtl_check_vendor_ok(intf))
+               return -ENODEV;
+
+       version = rtl8152_get_version(intf);
+       if (version == RTL_VER_UNKNOWN)
+               return -ENODEV;
+
+       for (i = 0; i < RTL8152_PROBE_TRIES; i++) {
+               ret = rtl8152_probe_once(intf, id, version);
+               if (ret != -EAGAIN)
+                       break;
+       }
+       if (ret == -EAGAIN) {
+               dev_err(&intf->dev,
+                       "r8152 failed probe after %d tries; giving up\n", i);
+               return -ENODEV;
+       }
+
+       return ret;
+}
+
 static void rtl8152_disconnect(struct usb_interface *intf)
 {
        struct r8152 *tp = usb_get_intfdata(intf);
index 17da42f..a530f20 100644 (file)
@@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
        ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
                 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                 0, index, &buf, 4);
-       if (ret < 0) {
+       if (ret < 4) {
+               ret = ret < 0 ? ret : -ENODATA;
+
                if (ret != -ENODEV)
                        netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
                                    index, ret);
index 12040ce..acc812e 100644 (file)
@@ -334,12 +334,14 @@ if RISCV
 config ARCH_R9A07G043
        bool "RISC-V Platform support for RZ/Five"
        depends on NONPORTABLE
+       depends on RISCV_ALTERNATIVE
+       depends on !RISCV_ISA_ZICBOM
+       depends on RISCV_SBI
        select ARCH_RZG2L
-       select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT
+       select AX45MP_L2_CACHE
        select DMA_GLOBAL_POOL
-       select ERRATA_ANDES if RISCV_SBI
-       select ERRATA_ANDES_CMO if ERRATA_ANDES
-
+       select ERRATA_ANDES
+       select ERRATA_ANDES_CMO
        help
          This enables support for the Renesas RZ/Five SoC.
 
index 60d6ac6..9c85162 100644 (file)
@@ -146,7 +146,8 @@ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev)
                ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs);
 }
 
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg)
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev)
 {
-       debugfs_remove_recursive(dbg);
+       debugfs_remove_recursive(ndev->debugfs);
+       ndev->debugfs = NULL;
 }
index 40a03b0..946488b 100644 (file)
@@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
        mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
 }
 
+static int read_umem_params(struct mlx5_vdpa_net *ndev)
+{
+       u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+       u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01);
+       struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+       int out_size;
+       void *caps;
+       void *out;
+       int err;
+
+       out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+       out = kzalloc(out_size, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+       MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+       err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+       if (err) {
+               mlx5_vdpa_warn(&ndev->mvdev,
+                       "Failed reading vdpa umem capabilities with err %d\n", err);
+               goto out;
+       }
+
+       caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+       ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a);
+       ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b);
+
+       ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a);
+       ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b);
+
+       ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a);
+       ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b);
+
+out:
+       kfree(out);
+       return 0;
+}
+
 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
                          struct mlx5_vdpa_umem **umemp)
 {
-       struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
-       int p_a;
-       int p_b;
+       u32 p_a;
+       u32 p_b;
 
        switch (num) {
        case 1:
-               p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
-               p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
+               p_a = ndev->umem_1_buffer_param_a;
+               p_b = ndev->umem_1_buffer_param_b;
                *umemp = &mvq->umem1;
                break;
        case 2:
-               p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
-               p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
+               p_a = ndev->umem_2_buffer_param_a;
+               p_b = ndev->umem_2_buffer_param_b;
                *umemp = &mvq->umem2;
                break;
        case 3:
-               p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
-               p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
+               p_a = ndev->umem_3_buffer_param_a;
+               p_b = ndev->umem_3_buffer_param_b;
                *umemp = &mvq->umem3;
                break;
        }
+
        (*umemp)->size = p_a * mvq->num_ent + p_b;
 }
 
@@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
                goto out;
        }
        mlx5_vdpa_add_debugfs(ndev);
+
+       err = read_umem_params(ndev);
+       if (err)
+               goto err_setup;
+
        err = setup_virtqueues(mvdev);
        if (err) {
                mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
@@ -2713,7 +2758,7 @@ err_tir:
 err_rqt:
        teardown_virtqueues(ndev);
 err_setup:
-       mlx5_vdpa_remove_debugfs(ndev->debugfs);
+       mlx5_vdpa_remove_debugfs(ndev);
 out:
        return err;
 }
@@ -2727,8 +2772,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
        if (!ndev->setup)
                return;
 
-       mlx5_vdpa_remove_debugfs(ndev->debugfs);
-       ndev->debugfs = NULL;
+       mlx5_vdpa_remove_debugfs(ndev);
        teardown_steering(ndev);
        destroy_tir(ndev);
        destroy_rqt(ndev);
@@ -3489,8 +3533,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        struct workqueue_struct *wq;
 
-       mlx5_vdpa_remove_debugfs(ndev->debugfs);
-       ndev->debugfs = NULL;
        unregister_link_notifier(ndev);
        _vdpa_unregister_device(dev);
        wq = mvdev->wq;
index 36c44d9..90b556a 100644 (file)
@@ -65,6 +65,15 @@ struct mlx5_vdpa_net {
        struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
        struct mlx5_vdpa_irq_pool irqp;
        struct dentry *debugfs;
+
+       u32 umem_1_buffer_param_a;
+       u32 umem_1_buffer_param_b;
+
+       u32 umem_2_buffer_param_a;
+       u32 umem_2_buffer_param_b;
+
+       u32 umem_3_buffer_param_a;
+       u32 umem_3_buffer_param_b;
 };
 
 struct mlx5_vdpa_counter {
@@ -88,7 +97,7 @@ struct macvlan_node {
 };
 
 void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev);
 void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
 void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
 void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
index 00d7d72..b3a3cb1 100644 (file)
@@ -499,12 +499,13 @@ static int __init vdpasim_blk_init(void)
                                         GFP_KERNEL);
                if (!shared_buffer) {
                        ret = -ENOMEM;
-                       goto parent_err;
+                       goto mgmt_dev_err;
                }
        }
 
        return 0;
-
+mgmt_dev_err:
+       vdpa_mgmtdev_unregister(&mgmt_dev);
 parent_err:
        device_unregister(&vdpasim_blk_mgmtdev);
        return ret;
index c71d573..e0c181a 100644 (file)
@@ -1458,9 +1458,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
                goto done;
        }
 
-       if ((msg.type == VHOST_IOTLB_UPDATE ||
-            msg.type == VHOST_IOTLB_INVALIDATE) &&
-            msg.size == 0) {
+       if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
                ret = -EINVAL;
                goto done;
        }
index 5b15936..2d5d252 100644 (file)
@@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
        virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
                        &num_pages);
 
-       target = num_pages;
+       /*
+        * Aligned up to guest page size to avoid inflating and deflating
+        * balloon endlessly.
+        */
+       target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
        return target - vb->num_pages;
 }
 
index 97760f6..59892a3 100644 (file)
@@ -631,14 +631,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        spin_lock_init(&vm_dev->lock);
 
        vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(vm_dev->base))
-               return PTR_ERR(vm_dev->base);
+       if (IS_ERR(vm_dev->base)) {
+               rc = PTR_ERR(vm_dev->base);
+               goto free_vm_dev;
+       }
 
        /* Check magic value */
        magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
        if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
                dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto free_vm_dev;
        }
 
        /* Check device version */
@@ -646,7 +649,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        if (vm_dev->version < 1 || vm_dev->version > 2) {
                dev_err(&pdev->dev, "Version %ld not supported!\n",
                                vm_dev->version);
-               return -ENXIO;
+               rc = -ENXIO;
+               goto free_vm_dev;
        }
 
        vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -655,7 +659,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
                 * virtio-mmio device with an ID 0 is a (dummy) placeholder
                 * with no function. End probing now with no error reported.
                 */
-               return -ENODEV;
+               rc = -ENODEV;
+               goto free_vm_dev;
        }
        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 
@@ -685,6 +690,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
                put_device(&vm_dev->vdev.dev);
 
        return rc;
+
+free_vm_dev:
+       kfree(vm_dev);
+       return rc;
 }
 
 static int virtio_mmio_remove(struct platform_device *pdev)
index aad7d92..9cb601e 100644 (file)
@@ -291,7 +291,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
        err = -EINVAL;
        mdev->common = vp_modern_map_capability(mdev, common,
                                      sizeof(struct virtio_pci_common_cfg), 4,
-                                     0, sizeof(struct virtio_pci_common_cfg),
+                                     0, sizeof(struct virtio_pci_modern_common_cfg),
                                      NULL, NULL);
        if (!mdev->common)
                goto err_map_common;
index b7d54ef..a4a809e 100644 (file)
@@ -3196,12 +3196,14 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
  * We still need to do a tree search to find out the parents. This is for
  * TREE_BLOCK_REF backref (keyed or inlined).
  *
+ * @trans:     Transaction handle.
  * @ref_key:   The same as @ref_key in  handle_direct_tree_backref()
  * @tree_key:  The first key of this tree block.
  * @path:      A clean (released) path, to avoid allocating path every time
  *             the function get called.
  */
-static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
+static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
+                                       struct btrfs_backref_cache *cache,
                                        struct btrfs_path *path,
                                        struct btrfs_key *ref_key,
                                        struct btrfs_key *tree_key,
@@ -3315,7 +3317,7 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
                         * If we know the block isn't shared we can avoid
                         * checking its backrefs.
                         */
-                       if (btrfs_block_can_be_shared(root, eb))
+                       if (btrfs_block_can_be_shared(trans, root, eb))
                                upper->checked = 0;
                        else
                                upper->checked = 1;
@@ -3363,11 +3365,13 @@ out:
  *      links aren't yet bi-directional. Needs to finish such links.
  *      Use btrfs_backref_finish_upper_links() to finish such linkage.
  *
+ * @trans:     Transaction handle.
  * @path:      Released path for indirect tree backref lookup
  * @iter:      Released backref iter for extent tree search
  * @node_key:  The first key of the tree block
  */
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+                               struct btrfs_backref_cache *cache,
                                struct btrfs_path *path,
                                struct btrfs_backref_iter *iter,
                                struct btrfs_key *node_key,
@@ -3467,8 +3471,8 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
                         * offset means the root objectid. We need to search
                         * the tree to get its parent bytenr.
                         */
-                       ret = handle_indirect_tree_backref(cache, path, &key, node_key,
-                                                          cur);
+                       ret = handle_indirect_tree_backref(trans, cache, path,
+                                                          &key, node_key, cur);
                        if (ret < 0)
                                goto out;
                }
index 1616e3e..71d535e 100644 (file)
@@ -540,7 +540,8 @@ static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
                    bytenr);
 }
 
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+                               struct btrfs_backref_cache *cache,
                                struct btrfs_path *path,
                                struct btrfs_backref_iter *iter,
                                struct btrfs_key *node_key,
index da519c1..617d482 100644 (file)
@@ -367,7 +367,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 /*
  * check if the tree block can be shared by multiple trees
  */
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root,
                              struct extent_buffer *buf)
 {
        /*
@@ -376,11 +377,21 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
         * not allocated by tree relocation, we know the block is not shared.
         */
        if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
-           buf != root->node && buf != root->commit_root &&
+           buf != root->node &&
            (btrfs_header_generation(buf) <=
             btrfs_root_last_snapshot(&root->root_item) ||
-            btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
-               return 1;
+            btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
+               if (buf != root->commit_root)
+                       return 1;
+               /*
+                * An extent buffer that used to be the commit root may still be
+                * shared because the tree height may have increased and it
+                * became a child of a higher level root. This can happen when
+                * snapshotting a subvolume created in the current transaction.
+                */
+               if (btrfs_header_generation(buf) == trans->transid)
+                       return 1;
+       }
 
        return 0;
 }
@@ -415,7 +426,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
         * are only allowed for blocks use full backrefs.
         */
 
-       if (btrfs_block_can_be_shared(root, buf)) {
+       if (btrfs_block_can_be_shared(trans, root, buf)) {
                ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
                                               btrfs_header_level(buf), 1,
                                               &refs, &flags);
index 9419f4e..ff40acd 100644 (file)
@@ -540,7 +540,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root,
                      struct extent_buffer *buf,
                      struct extent_buffer **cow_ret, u64 new_root_objectid);
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root,
                              struct extent_buffer *buf);
 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                  struct btrfs_path *path, int level, int slot);
index 9951a0c..c6d4bb8 100644 (file)
@@ -466,6 +466,7 @@ static bool handle_useless_nodes(struct reloc_control *rc,
  * cached.
  */
 static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+                       struct btrfs_trans_handle *trans,
                        struct reloc_control *rc, struct btrfs_key *node_key,
                        int level, u64 bytenr)
 {
@@ -499,8 +500,8 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
 
        /* Breadth-first search to build backref cache */
        do {
-               ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
-                                                 cur);
+               ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
+                                                 node_key, cur);
                if (ret < 0) {
                        err = ret;
                        goto out;
@@ -2803,7 +2804,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
 
        /* Do tree relocation */
        rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
-               node = build_backref_tree(rc, &block->key,
+               node = build_backref_tree(trans, rc, &block->key,
                                          block->level, block->bytenr);
                if (IS_ERR(node)) {
                        err = PTR_ERR(node);
index 48260cf..02f5fca 100644 (file)
@@ -1788,6 +1788,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
        if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
                goto out;
 
+       err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+       if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+               goto out;
+       if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+               goto out;
+
 retry:
        host_err = fh_want_write(ffhp);
        if (host_err) {
@@ -1823,12 +1829,6 @@ retry:
        if (ndentry == trap)
                goto out_dput_new;
 
-       host_err = -EXDEV;
-       if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
-               goto out_dput_new;
-       if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
-               goto out_dput_new;
-
        if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
            nfsd_has_cached_files(ndentry)) {
                close_cached = true;
index a30686e..47d25a5 100644 (file)
@@ -60,6 +60,7 @@ struct resv_map {
        long adds_in_progress;
        struct list_head region_cache;
        long region_cache_count;
+       struct rw_semaphore rw_sema;
 #ifdef CONFIG_CGROUP_HUGETLB
        /*
         * On private mappings, the counter to uncharge reservations is stored
@@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
 void unmap_hugepage_range(struct vm_area_struct *,
                          unsigned long, unsigned long, struct page *,
                          zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
                          struct vm_area_struct *vma,
                          unsigned long start, unsigned long end,
                          struct page *ref_page, zap_flags_t zap_flags);
@@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
                                unsigned long *start, unsigned long *end);
 
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+                               unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+                             struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+                                    unsigned long *start, unsigned long *end)
+{
+       if (is_vm_hugetlb_page(vma))
+               __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+                                  struct zap_details *details)
+{
+       if (is_vm_hugetlb_page(vma))
+               __hugetlb_zap_end(vma, details);
+}
+
 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -296,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
 {
 }
 
+static inline void hugetlb_zap_begin(
+                               struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+                               struct vm_area_struct *vma,
+                               struct zap_details *details)
+{
+}
+
 static inline struct page *hugetlb_follow_page_mask(
     struct vm_area_struct *vma, unsigned long address, unsigned int flags,
     unsigned int *page_mask)
@@ -441,7 +473,7 @@ static inline long hugetlb_change_protection(
        return 0;
 }
 
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
                        struct vm_area_struct *vma, unsigned long start,
                        unsigned long end, struct page *ref_page,
                        zap_flags_t zap_flags)
@@ -1233,6 +1265,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
        return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
 }
 
+static inline bool __vma_private_lock(struct vm_area_struct *vma)
+{
+       return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+}
+
 /*
  * Safe version of huge_pte_offset() to check the locks.  See comments
  * above huge_pte_offset().
index bd2f6e1..b24fb80 100644 (file)
@@ -4356,6 +4356,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
 }
 
 /**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+       u8 action;
+
+       if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+           skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+               return false;
+
+       action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+       return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+               action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+               action != WLAN_PUB_ACTION_MSMT_PILOT &&
+               action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+               action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+               action != WLAN_PUB_ACTION_FTM_REQUEST &&
+               action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+               action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+}
+
+/**
  * _ieee80211_is_group_privacy_action - check if frame is a group addressed
  * privacy action frame
  * @hdr: the frame
index 842623d..71fa9a4 100644 (file)
@@ -466,10 +466,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
 
 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
 
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
 #endif /* LINUX_KASAN_H */
index d466e1a..fe1507c 100644 (file)
@@ -53,6 +53,7 @@ struct nf_flowtable_type {
        struct list_head                list;
        int                             family;
        int                             (*init)(struct nf_flowtable *ft);
+       bool                            (*gc)(const struct flow_offload *flow);
        int                             (*setup)(struct nf_flowtable *ft,
                                                 struct net_device *dev,
                                                 enum flow_block_command cmd);
index 2f61298..3dcdb9e 100644 (file)
@@ -33,6 +33,6 @@ enum gtp_attrs {
        GTPA_PAD,
        __GTPA_MAX,
 };
-#define GTPA_MAX (__GTPA_MAX + 1)
+#define GTPA_MAX (__GTPA_MAX - 1)
 
 #endif /* _UAPI_LINUX_GTP_H_ */
index 0e00a84..bb24d84 100644 (file)
@@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
        /* Internal nodes */
        nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
        /* Add working room for split (2 nodes) + new parents */
-       mas_node_count(mas, nr_nodes + 3);
+       mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
 
        /* Detect if allocations run out */
        mas->mas_flags |= MA_STATE_PREALLOC;
index 0695916..464eeb9 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/maple_tree.h>
 #include <linux/module.h>
+#include <linux/rwsem.h>
 
 #define MTREE_ALLOC_MAX 0x2000000000000Ul
 #define CONFIG_MAPLE_SEARCH
@@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
        void *val;
        MA_STATE(mas, mt, 0, 0);
        MA_STATE(newmas, mt, 0, 0);
+       struct rw_semaphore newmt_lock;
+
+       init_rwsem(&newmt_lock);
 
        for (i = 0; i <= nr_entries; i++)
                mtree_store_range(mt, i*10, i*10 + 5,
                                  xa_mk_value(i), GFP_KERNEL);
 
        mt_set_non_kernel(99999);
-       mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+       mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+       mt_set_external_lock(&newmt, &newmt_lock);
        newmas.tree = &newmt;
        mas_reset(&newmas);
        mas_reset(&mas);
-       mas_lock(&newmas);
+       down_write(&newmt_lock);
        mas.index = 0;
        mas.last = 0;
        if (mas_expected_entries(&newmas, nr_entries)) {
@@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
        }
        rcu_read_unlock();
        mas_destroy(&newmas);
-       mas_unlock(&newmas);
        mt_validate(&newmt);
        mt_set_non_kernel(0);
-       mtree_destroy(&newmt);
+       __mt_destroy(&newmt);
+       up_write(&newmt_lock);
 }
 
 static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
        void *val;
        MA_STATE(mas, mt, 0, 0);
        MA_STATE(newmas, mt, 0, 0);
+       struct rw_semaphore newmt_lock;
+
+       init_rwsem(&newmt_lock);
+       mt_set_external_lock(&newmt, &newmt_lock);
 
        for (i = 0; i <= nr_entries; i++)
                mtree_store_range(mt, i*10, i*10 + 5,
@@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
                mas.index = 0;
                mas.last = 0;
                rcu_read_lock();
-               mas_lock(&newmas);
+               down_write(&newmt_lock);
                if (mas_expected_entries(&newmas, nr_entries)) {
                        printk("OOM!");
                        BUG_ON(1);
@@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
                        mas_store(&newmas, val);
                }
                mas_destroy(&newmas);
-               mas_unlock(&newmas);
                rcu_read_unlock();
                mt_validate(&newmt);
                mt_set_non_kernel(0);
-               mtree_destroy(&newmt);
+               __mt_destroy(&newmt);
+               up_write(&newmt_lock);
        }
 }
 #endif
@@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
        void *tmp;
        MA_STATE(mas, mt, 0, 0);
        MA_STATE(newmas, &newmt, 0, 0);
+       struct rw_semaphore newmt_lock;
+
+       init_rwsem(&newmt_lock);
+       mt_set_external_lock(&newmt, &newmt_lock);
 
        if (!zero_start)
                i = 1;
@@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
                mtree_store_range(mt, i*10, (i+1)*10 - gap,
                                  xa_mk_value(i), GFP_KERNEL);
 
-       mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+       mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
        mt_set_non_kernel(99999);
-       mas_lock(&newmas);
+       down_write(&newmt_lock);
        ret = mas_expected_entries(&newmas, nr_entries);
        mt_set_non_kernel(0);
        MT_BUG_ON(mt, ret != 0);
@@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
        }
        rcu_read_unlock();
        mas_destroy(&newmas);
-       mas_unlock(&newmas);
 
-       mtree_destroy(&newmt);
+       __mt_destroy(&newmt);
+       up_write(&newmt_lock);
 }
 
 /* Duplicate many sizes of trees.  Mainly to test expected entry values */
index b86ba7b..f60e561 100644 (file)
@@ -1208,6 +1208,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
        return 0;
 }
 
+static bool damon_sysfs_schemes_regions_updating;
+
 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
 {
        struct damon_target *t, *next;
@@ -1219,8 +1221,10 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
        cmd = damon_sysfs_cmd_request.cmd;
        if (kdamond && ctx == kdamond->damon_ctx &&
                        (cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
-                        cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES)) {
+                        cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
+                       damon_sysfs_schemes_regions_updating) {
                damon_sysfs_schemes_update_regions_stop(ctx);
+               damon_sysfs_schemes_regions_updating = false;
                mutex_unlock(&damon_sysfs_lock);
        }
 
@@ -1340,7 +1344,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
 static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
 {
        struct damon_sysfs_kdamond *kdamond;
-       static bool damon_sysfs_schemes_regions_updating;
        bool total_bytes_only = false;
        int err = 0;
 
index 52d2607..1301ba7 100644 (file)
@@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
                unsigned long start, unsigned long end);
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
 
 static inline bool subpool_is_free(struct hugepage_subpool *spool)
 {
@@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                down_read(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               down_read(&resv_map->rw_sema);
        }
 }
 
@@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                up_read(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               up_read(&resv_map->rw_sema);
        }
 }
 
@@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                down_write(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               down_write(&resv_map->rw_sema);
        }
 }
 
@@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                up_write(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               up_write(&resv_map->rw_sema);
        }
 }
 
 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
 {
-       struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
-       if (!__vma_shareable_lock(vma))
-               return 1;
+       if (__vma_shareable_lock(vma)) {
+               struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
-       return down_write_trylock(&vma_lock->rw_sema);
+               return down_write_trylock(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               return down_write_trylock(&resv_map->rw_sema);
+       }
+
+       return 1;
 }
 
 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                lockdep_assert_held(&vma_lock->rw_sema);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               lockdep_assert_held(&resv_map->rw_sema);
        }
 }
 
@@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
                struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
 
                __hugetlb_vma_unlock_write_put(vma_lock);
+       } else if (__vma_private_lock(vma)) {
+               struct resv_map *resv_map = vma_resv_map(vma);
+
+               /* no free for anon vmas, but still need to unlock */
+               up_write(&resv_map->rw_sema);
        }
 }
 
@@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
        kref_init(&resv_map->refs);
        spin_lock_init(&resv_map->lock);
        INIT_LIST_HEAD(&resv_map->regions);
+       init_rwsem(&resv_map->rw_sema);
 
        resv_map->adds_in_progress = 0;
        /*
@@ -1138,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
        VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
        VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
 
-       set_vma_private_data(vma, (get_vma_private_data(vma) &
-                               HPAGE_RESV_MASK) | (unsigned long)map);
+       set_vma_private_data(vma, (unsigned long)map);
 }
 
 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
@@ -5274,9 +5306,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
        return len + old_addr - old_end;
 }
 
-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+                           unsigned long start, unsigned long end,
+                           struct page *ref_page, zap_flags_t zap_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -5405,16 +5437,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
                tlb_flush_mmu_tlbonly(tlb);
 }
 
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
-                         struct vm_area_struct *vma, unsigned long start,
-                         unsigned long end, struct page *ref_page,
-                         zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+                        unsigned long *start, unsigned long *end)
 {
+       if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
+               return;
+
+       adjust_range_if_pmd_sharing_possible(vma, start, end);
        hugetlb_vma_lock_write(vma);
-       i_mmap_lock_write(vma->vm_file->f_mapping);
+       if (vma->vm_file)
+               i_mmap_lock_write(vma->vm_file->f_mapping);
+}
 
-       /* mmu notification performed in caller */
-       __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+                      struct zap_details *details)
+{
+       zap_flags_t zap_flags = details ? details->zap_flags : 0;
+
+       if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
+               return;
 
        if (zap_flags & ZAP_FLAG_UNMAP) {       /* final unmap */
                /*
@@ -5427,11 +5468,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
                 * someone else.
                 */
                __hugetlb_vma_unlock_write_free(vma);
-               i_mmap_unlock_write(vma->vm_file->f_mapping);
        } else {
-               i_mmap_unlock_write(vma->vm_file->f_mapping);
                hugetlb_vma_unlock_write(vma);
        }
+
+       if (vma->vm_file)
+               i_mmap_unlock_write(vma->vm_file->f_mapping);
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -6811,8 +6853,10 @@ out_err:
                 */
                if (chg >= 0 && add < 0)
                        region_abort(resv_map, from, to, regions_needed);
-       if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+       if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
                kref_put(&resv_map->refs, resv_map_release);
+               set_vma_resv_map(vma, NULL);
+       }
        return false;
 }
 
index ca4b6ff..6e3cb11 100644 (file)
@@ -621,7 +621,7 @@ void kasan_report_async(void)
 }
 #endif /* CONFIG_KASAN_HW_TAGS */
 
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 /*
  * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
  * canonical half of the address space) cause out-of-bounds shadow memory reads
index 6c264d2..517221f 100644 (file)
@@ -1683,7 +1683,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
                        if (vma->vm_file) {
                                zap_flags_t zap_flags = details ?
                                    details->zap_flags : 0;
-                               __unmap_hugepage_range_final(tlb, vma, start, end,
+                               __unmap_hugepage_range(tlb, vma, start, end,
                                                             NULL, zap_flags);
                        }
                } else
@@ -1728,8 +1728,12 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
                                start_addr, end_addr);
        mmu_notifier_invalidate_range_start(&range);
        do {
-               unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
+               unsigned long start = start_addr;
+               unsigned long end = end_addr;
+               hugetlb_zap_begin(vma, &start, &end);
+               unmap_single_vma(tlb, vma, start, end, &details,
                                 mm_wr_locked);
+               hugetlb_zap_end(vma, &details);
        } while ((vma = mas_find(mas, tree_end - 1)) != NULL);
        mmu_notifier_invalidate_range_end(&range);
 }
@@ -1753,9 +1757,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
        lru_add_drain();
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
                                address, end);
-       if (is_vm_hugetlb_page(vma))
-               adjust_range_if_pmd_sharing_possible(vma, &range.start,
-                                                    &range.end);
+       hugetlb_zap_begin(vma, &range.start, &range.end);
        tlb_gather_mmu(&tlb, vma->vm_mm);
        update_hiwater_rss(vma->vm_mm);
        mmu_notifier_invalidate_range_start(&range);
@@ -1766,6 +1768,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
        unmap_single_vma(&tlb, vma, address, end, details, false);
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb);
+       hugetlb_zap_end(vma, details);
 }
 
 /**
index f1b00d6..29ebf1e 100644 (file)
@@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
                 * the home node for vmas we already updated before.
                 */
                old = vma_policy(vma);
-               if (!old)
+               if (!old) {
+                       prev = vma;
                        continue;
+               }
                if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
                        err = -EOPNOTSUPP;
                        break;
index 2053b54..06086dc 100644 (file)
@@ -2162,6 +2162,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                         const int __user *nodes,
                         int __user *status, int flags)
 {
+       compat_uptr_t __user *compat_pages = (void __user *)pages;
        int current_node = NUMA_NO_NODE;
        LIST_HEAD(pagelist);
        int start, i;
@@ -2174,8 +2175,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                int node;
 
                err = -EFAULT;
-               if (get_user(p, pages + i))
-                       goto out_flush;
+               if (in_compat_syscall()) {
+                       compat_uptr_t cp;
+
+                       if (get_user(cp, compat_pages + i))
+                               goto out_flush;
+
+                       p = compat_ptr(cp);
+               } else {
+                       if (get_user(p, pages + i))
+                               goto out_flush;
+               }
                if (get_user(node, nodes + i))
                        goto out_flush;
 
index b56a7f0..9e018d8 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -583,11 +583,12 @@ again:
  * dup_anon_vma() - Helper function to duplicate anon_vma
  * @dst: The destination VMA
  * @src: The source VMA
+ * @dup: Pointer to the destination VMA when successful.
  *
  * Returns: 0 on success.
  */
 static inline int dup_anon_vma(struct vm_area_struct *dst,
-                              struct vm_area_struct *src)
+               struct vm_area_struct *src, struct vm_area_struct **dup)
 {
        /*
         * Easily overlooked: when mprotect shifts the boundary, make sure the
@@ -595,9 +596,15 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
         * anon pages imported.
         */
        if (src->anon_vma && !dst->anon_vma) {
+               int ret;
+
                vma_assert_write_locked(dst);
                dst->anon_vma = src->anon_vma;
-               return anon_vma_clone(dst, src);
+               ret = anon_vma_clone(dst, src);
+               if (ret)
+                       return ret;
+
+               *dup = dst;
        }
 
        return 0;
@@ -624,6 +631,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
               unsigned long start, unsigned long end, pgoff_t pgoff,
               struct vm_area_struct *next)
 {
+       struct vm_area_struct *anon_dup = NULL;
        bool remove_next = false;
        struct vma_prepare vp;
 
@@ -633,7 +641,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
 
                remove_next = true;
                vma_start_write(next);
-               ret = dup_anon_vma(vma, next);
+               ret = dup_anon_vma(vma, next, &anon_dup);
                if (ret)
                        return ret;
        }
@@ -661,6 +669,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
        return 0;
 
 nomem:
+       if (anon_dup)
+               unlink_anon_vmas(anon_dup);
        return -ENOMEM;
 }
 
@@ -860,6 +870,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
 {
        struct vm_area_struct *curr, *next, *res;
        struct vm_area_struct *vma, *adjust, *remove, *remove2;
+       struct vm_area_struct *anon_dup = NULL;
        struct vma_prepare vp;
        pgoff_t vma_pgoff;
        int err = 0;
@@ -927,18 +938,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
                vma_start_write(next);
                remove = next;                          /* case 1 */
                vma_end = next->vm_end;
-               err = dup_anon_vma(prev, next);
+               err = dup_anon_vma(prev, next, &anon_dup);
                if (curr) {                             /* case 6 */
                        vma_start_write(curr);
                        remove = curr;
                        remove2 = next;
                        if (!next->anon_vma)
-                               err = dup_anon_vma(prev, curr);
+                               err = dup_anon_vma(prev, curr, &anon_dup);
                }
        } else if (merge_prev) {                        /* case 2 */
                if (curr) {
                        vma_start_write(curr);
-                       err = dup_anon_vma(prev, curr);
+                       err = dup_anon_vma(prev, curr, &anon_dup);
                        if (end == curr->vm_end) {      /* case 7 */
                                remove = curr;
                        } else {                        /* case 5 */
@@ -954,7 +965,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
                        vma_end = addr;
                        adjust = next;
                        adj_start = -(prev->vm_end - addr);
-                       err = dup_anon_vma(next, prev);
+                       err = dup_anon_vma(next, prev, &anon_dup);
                } else {
                        /*
                         * Note that cases 3 and 8 are the ONLY ones where prev
@@ -968,14 +979,14 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
                                vma_pgoff = curr->vm_pgoff;
                                vma_start_write(curr);
                                remove = curr;
-                               err = dup_anon_vma(next, curr);
+                               err = dup_anon_vma(next, curr, &anon_dup);
                        }
                }
        }
 
        /* Error in anon_vma clone. */
        if (err)
-               return NULL;
+               goto anon_vma_fail;
 
        if (vma_start < vma->vm_start || vma_end > vma->vm_end)
                vma_expanded = true;
@@ -988,7 +999,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
        }
 
        if (vma_iter_prealloc(vmi, vma))
-               return NULL;
+               goto prealloc_fail;
 
        init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
        VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
@@ -1016,6 +1027,15 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
        vma_complete(&vp, vmi, mm);
        khugepaged_enter_vma(res, vm_flags);
        return res;
+
+prealloc_fail:
+       if (anon_dup)
+               unlink_anon_vmas(anon_dup);
+
+anon_vma_fail:
+       vma_iter_set(vmi, addr);
+       vma_iter_load(vmi);
+       return NULL;
 }
 
 /*
@@ -3143,13 +3163,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
        if (!len)
                return 0;
 
-       if (mmap_write_lock_killable(mm))
-               return -EINTR;
-
        /* Until we need other flags, refuse anything except VM_EXEC. */
        if ((flags & (~VM_EXEC)) != 0)
                return -EINVAL;
 
+       if (mmap_write_lock_killable(mm))
+               return -EINTR;
+
        ret = check_brk_limits(addr, len);
        if (ret)
                goto limits_failed;
index 95546f3..8574140 100644 (file)
@@ -6475,6 +6475,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
                        next_page = page;
                        current_buddy = page + size;
                }
+               page = next_page;
 
                if (set_page_guard(zone, current_buddy, high, migratetype))
                        continue;
@@ -6482,7 +6483,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
                if (current_buddy != target) {
                        add_to_free_list(current_buddy, zone, high, migratetype);
                        set_buddy_order(current_buddy, high);
-                       page = next_page;
                }
        }
 }
index 083c693..37d2b1c 100644 (file)
@@ -1383,8 +1383,8 @@ reject:
 
 shrink:
        pool = zswap_pool_last_get();
-       if (pool)
-               queue_work(shrink_wq, &pool->shrink_work);
+       if (pool && !queue_work(shrink_wq, &pool->shrink_work))
+               zswap_pool_put(pool);
        goto reject;
 }
 
index 9c09f09..df81c1f 100644 (file)
@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
 
 static int neigh_forced_gc(struct neigh_table *tbl)
 {
-       int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+       int max_clean = atomic_read(&tbl->gc_entries) -
+                       READ_ONCE(tbl->gc_thresh2);
        unsigned long tref = jiffies - 5 * HZ;
        struct neighbour *n, *tmp;
        int shrunk = 0;
@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
                }
        }
 
-       tbl->last_flush = jiffies;
+       WRITE_ONCE(tbl->last_flush, jiffies);
 
        write_unlock_bh(&tbl->lock);
 
@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
 {
        struct neighbour *n = NULL;
        unsigned long now = jiffies;
-       int entries;
+       int entries, gc_thresh3;
 
        if (exempt_from_gc)
                goto do_alloc;
 
        entries = atomic_inc_return(&tbl->gc_entries) - 1;
-       if (entries >= tbl->gc_thresh3 ||
-           (entries >= tbl->gc_thresh2 &&
-            time_after(now, tbl->last_flush + 5 * HZ))) {
-               if (!neigh_forced_gc(tbl) &&
-                   entries >= tbl->gc_thresh3) {
+       gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
+       if (entries >= gc_thresh3 ||
+           (entries >= READ_ONCE(tbl->gc_thresh2) &&
+            time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
+               if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
                        net_info_ratelimited("%s: neighbor table overflow!\n",
                                             tbl->id);
                        NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
 
        if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
                struct neigh_parms *p;
-               tbl->last_rand = jiffies;
+
+               WRITE_ONCE(tbl->last_rand, jiffies);
                list_for_each_entry(p, &tbl->parms_list, list)
                        p->reachable_time =
                                neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
        }
 
-       if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+       if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
                goto out;
 
        for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
        ndtmsg->ndtm_pad2   = 0;
 
        if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
-           nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
-           nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
-           nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
-           nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+           nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
+                         NDTA_PAD) ||
+           nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
+           nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
+           nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
                goto nla_put_failure;
        {
                unsigned long now = jiffies;
-               long flush_delta = now - tbl->last_flush;
-               long rand_delta = now - tbl->last_rand;
+               long flush_delta = now - READ_ONCE(tbl->last_flush);
+               long rand_delta = now - READ_ONCE(tbl->last_rand);
                struct neigh_hash_table *nht;
                struct ndt_config ndc = {
                        .ndtc_key_len           = tbl->key_len,
@@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                        .ndtc_entries           = atomic_read(&tbl->entries),
                        .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
                        .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
-                       .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
+                       .ndtc_proxy_qlen        = READ_ONCE(tbl->proxy_queue.qlen),
                };
 
                rcu_read_lock();
@@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                        struct neigh_statistics *st;
 
                        st = per_cpu_ptr(tbl->stats, cpu);
-                       ndst.ndts_allocs                += st->allocs;
-                       ndst.ndts_destroys              += st->destroys;
-                       ndst.ndts_hash_grows            += st->hash_grows;
-                       ndst.ndts_res_failed            += st->res_failed;
-                       ndst.ndts_lookups               += st->lookups;
-                       ndst.ndts_hits                  += st->hits;
-                       ndst.ndts_rcv_probes_mcast      += st->rcv_probes_mcast;
-                       ndst.ndts_rcv_probes_ucast      += st->rcv_probes_ucast;
-                       ndst.ndts_periodic_gc_runs      += st->periodic_gc_runs;
-                       ndst.ndts_forced_gc_runs        += st->forced_gc_runs;
-                       ndst.ndts_table_fulls           += st->table_fulls;
+                       ndst.ndts_allocs                += READ_ONCE(st->allocs);
+                       ndst.ndts_destroys              += READ_ONCE(st->destroys);
+                       ndst.ndts_hash_grows            += READ_ONCE(st->hash_grows);
+                       ndst.ndts_res_failed            += READ_ONCE(st->res_failed);
+                       ndst.ndts_lookups               += READ_ONCE(st->lookups);
+                       ndst.ndts_hits                  += READ_ONCE(st->hits);
+                       ndst.ndts_rcv_probes_mcast      += READ_ONCE(st->rcv_probes_mcast);
+                       ndst.ndts_rcv_probes_ucast      += READ_ONCE(st->rcv_probes_ucast);
+                       ndst.ndts_periodic_gc_runs      += READ_ONCE(st->periodic_gc_runs);
+                       ndst.ndts_forced_gc_runs        += READ_ONCE(st->forced_gc_runs);
+                       ndst.ndts_table_fulls           += READ_ONCE(st->table_fulls);
                }
 
                if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto errout_tbl_lock;
 
        if (tb[NDTA_THRESH1])
-               tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+               WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
 
        if (tb[NDTA_THRESH2])
-               tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+               WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
 
        if (tb[NDTA_THRESH3])
-               tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+               WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
 
        if (tb[NDTA_GC_INTERVAL])
-               tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+               WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
 
        err = 0;
 
index d0bc1dd..80c7302 100644 (file)
@@ -87,29 +87,6 @@ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
 }
 EXPORT_SYMBOL(handshake_genl_put);
 
-/*
- * dup() a kernel socket for use as a user space file descriptor
- * in the current process. The kernel socket must have an
- * instatiated struct file.
- *
- * Implicit argument: "current()"
- */
-static int handshake_dup(struct socket *sock)
-{
-       struct file *file;
-       int newfd;
-
-       file = get_file(sock->file);
-       newfd = get_unused_fd_flags(O_CLOEXEC);
-       if (newfd < 0) {
-               fput(file);
-               return newfd;
-       }
-
-       fd_install(newfd, file);
-       return newfd;
-}
-
 int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = sock_net(skb->sk);
@@ -133,17 +110,20 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
                goto out_status;
 
        sock = req->hr_sk->sk_socket;
-       fd = handshake_dup(sock);
+       fd = get_unused_fd_flags(O_CLOEXEC);
        if (fd < 0) {
                err = fd;
                goto out_complete;
        }
+
        err = req->hr_proto->hp_accept(req, info, fd);
        if (err) {
-               fput(sock->file);
+               put_unused_fd(fd);
                goto out_complete;
        }
 
+       fd_install(fd, get_file(sock->file));
+
        trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
        return 0;
 
index d18f0f0..4ccfc10 100644 (file)
@@ -786,7 +786,7 @@ int esp_input_done2(struct sk_buff *skb, int err)
 
                /*
                 * 1) if the NAT-T peer's IP or port changed then
-                *    advertize the change to the keying daemon.
+                *    advertise the change to the keying daemon.
                 *    This is an inbound SA, so just compare
                 *    SRC ports.
                 */
index d3456cf..3d3a24f 100644 (file)
@@ -927,10 +927,11 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
        return mss_now;
 }
 
-/* In some cases, both sendmsg() could have added an skb to the write queue,
- * but failed adding payload on it.  We need to remove it to consume less
+/* In some cases, sendmsg() could have added an skb to the write queue,
+ * but failed adding payload on it. We need to remove it to consume less
  * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
- * epoll() users.
+ * epoll() users. Another reason is that tcp_write_xmit() does not like
+ * finding an empty skb in the write queue.
  */
 void tcp_remove_empty_skb(struct sock *sk)
 {
@@ -1289,6 +1290,7 @@ new_segment:
 
 wait_for_space:
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+               tcp_remove_empty_skb(sk);
                if (copied)
                        tcp_push(sk, flags & ~MSG_MORE, mss_now,
                                 TCP_NAGLE_PUSH, size_goal);
index 8afb095..804821d 100644 (file)
@@ -2207,16 +2207,17 @@ void tcp_enter_loss(struct sock *sk)
  * restore sanity to the SACK scoreboard. If the apparent reneging
  * persists until this RTO then we'll clear the SACK scoreboard.
  */
-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
 {
-       if (flag & FLAG_SACK_RENEGING &&
-           flag & FLAG_SND_UNA_ADVANCED) {
+       if (*ack_flag & FLAG_SACK_RENEGING &&
+           *ack_flag & FLAG_SND_UNA_ADVANCED) {
                struct tcp_sock *tp = tcp_sk(sk);
                unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
                                          msecs_to_jiffies(10));
 
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                          delay, TCP_RTO_MAX);
+               *ack_flag &= ~FLAG_SET_XMIT_TIMER;
                return true;
        }
        return false;
@@ -2986,7 +2987,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
                tp->prior_ssthresh = 0;
 
        /* B. In all the states check for reneging SACKs. */
-       if (tcp_check_sack_reneging(sk, flag))
+       if (tcp_check_sack_reneging(sk, ack_flag))
                return;
 
        /* C. Check consistency of the current state. */
index e023d29..2cc1a45 100644 (file)
@@ -833,7 +833,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
 
                /*
                 * 1) if the NAT-T peer's IP or port changed then
-                *    advertize the change to the keying daemon.
+                *    advertise the change to the keying daemon.
                 *    This is an inbound SA, so just compare
                 *    SRC ports.
                 */
index e751cda..8f6b6f5 100644 (file)
@@ -2468,8 +2468,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
 
                /* drop unicast public action frames when using MPF */
                if (is_unicast_ether_addr(mgmt->da) &&
-                   ieee80211_is_public_action((void *)rx->skb->data,
-                                              rx->skb->len))
+                   ieee80211_is_protected_dual_of_public_action(rx->skb))
                        return -EACCES;
        }
 
index 1d34d70..920a5a2 100644 (file)
@@ -316,12 +316,6 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
 }
 EXPORT_SYMBOL_GPL(flow_offload_refresh);
 
-static bool nf_flow_is_outdated(const struct flow_offload *flow)
-{
-       return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
-               !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
-}
-
 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
 {
        return nf_flow_timeout_delta(flow->timeout) <= 0;
@@ -407,12 +401,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
        return err;
 }
 
+static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
+                             const struct flow_offload *flow)
+{
+       return flow_table->type->gc && flow_table->type->gc(flow);
+}
+
 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
                                    struct flow_offload *flow, void *data)
 {
        if (nf_flow_has_expired(flow) ||
            nf_ct_is_dying(flow->ct) ||
-           nf_flow_is_outdated(flow))
+           nf_flow_custom_gc(flow_table, flow))
                flow_offload_teardown(flow);
 
        if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
index 7c652d1..fb52d6f 100644 (file)
@@ -278,7 +278,16 @@ err_nat:
        return err;
 }
 
+static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
+{
+       return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
+              test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
+              !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
+              !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+}
+
 static struct nf_flowtable_type flowtable_ct = {
+       .gc             = tcf_ct_flow_is_outdated,
        .action         = tcf_ct_flow_table_fill_actions,
        .owner          = THIS_MODULE,
 };
index e95df84..b80bf68 100644 (file)
@@ -555,6 +555,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
 
        virtio_device_ready(vdev);
 
+       return 0;
+}
+
+static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
+{
        mutex_lock(&vsock->tx_lock);
        vsock->tx_run = true;
        mutex_unlock(&vsock->tx_lock);
@@ -569,7 +574,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
        vsock->event_run = true;
        mutex_unlock(&vsock->event_lock);
 
-       return 0;
+       /* virtio_transport_send_pkt() can queue packets once
+        * the_virtio_vsock is set, but they won't be processed until
+        * vsock->tx_run is set to true. We queue vsock->send_pkt_work
+        * when initialization finishes to send those packets queued
+        * earlier.
+        * We don't need to queue the other workers (rx, event) because
+        * as long as we don't fill the queues with empty buffers, the
+        * host can't send us any notification.
+        */
+       queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
 }
 
 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
@@ -664,6 +678,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
                goto out;
 
        rcu_assign_pointer(the_virtio_vsock, vsock);
+       virtio_vsock_vqs_start(vsock);
 
        mutex_unlock(&the_virtio_vsock_mutex);
 
@@ -736,6 +751,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
                goto out;
 
        rcu_assign_pointer(the_virtio_vsock, vsock);
+       virtio_vsock_vqs_start(vsock);
 
 out:
        mutex_unlock(&the_virtio_vsock_mutex);
index 3e2c398..55a1d36 100644 (file)
@@ -43,10 +43,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
 
        for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
                cr.links[link_id].status = data->links[link_id].status;
+               cr.links[link_id].bss = data->links[link_id].bss;
+
                WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
                             (!cr.ap_mld_addr || !cr.links[link_id].bss));
 
-               cr.links[link_id].bss = data->links[link_id].bss;
                if (!cr.links[link_id].bss)
                        continue;
                cr.links[link_id].bssid = data->links[link_id].bss->bssid;
index 939deec..8210a60 100644 (file)
@@ -2125,7 +2125,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
        if (!res)
                goto drop;
 
-       rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
+       rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
 
        if (data->bss_source == BSS_SOURCE_MBSSID) {
                /* this is a nontransmitting bss, we need to add it to
diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
new file mode 100644 (file)
index 0000000..83971b3
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _TOOLS__RWSEM_H
+#define _TOOLS__RWSEM_H
+
+#include <pthread.h>
+
+struct rw_semaphore {
+       pthread_rwlock_t lock;
+};
+
+static inline int init_rwsem(struct rw_semaphore *sem)
+{
+       return pthread_rwlock_init(&sem->lock, NULL);
+}
+
+static inline int exit_rwsem(struct rw_semaphore *sem)
+{
+       return pthread_rwlock_destroy(&sem->lock);
+}
+
+static inline int down_read(struct rw_semaphore *sem)
+{
+       return pthread_rwlock_rdlock(&sem->lock);
+}
+
+static inline int up_read(struct rw_semaphore *sem)
+{
+       return pthread_rwlock_unlock(&sem->lock);
+}
+
+static inline int down_write(struct rw_semaphore *sem)
+{
+       return pthread_rwlock_wrlock(&sem->lock);
+}
+
+static inline int up_write(struct rw_semaphore *sem)
+{
+       return pthread_rwlock_unlock(&sem->lock);
+}
+#endif /* _TOOLS_RWSEM_H */
index 64415b9..28c26a0 100644 (file)
@@ -167,7 +167,9 @@ void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_
        __asm__ volatile (
                "xor  %ebp, %ebp\n"       /* zero the stack frame                                */
                "mov  %esp, %eax\n"       /* save stack pointer to %eax, as arg1 of _start_c     */
-               "and  $-16, %esp\n"       /* last pushed argument must be 16-byte aligned        */
+               "add  $12, %esp\n"        /* avoid over-estimating after the 'and' & 'sub' below */
+               "and  $-16, %esp\n"       /* the %esp must be 16-byte aligned on 'call'          */
+               "sub  $12, %esp\n"        /* sub 12 to keep it aligned after the push %eax       */
                "push %eax\n"             /* push arg1 on stack to support plain stack modes too */
                "call _start_c\n"         /* transfer to c runtime                               */
                "hlt\n"                   /* ensure it does not return                           */
index a5f33fe..a05655b 100644 (file)
@@ -13,6 +13,7 @@ const unsigned long *_auxv __attribute__((weak));
 static void __stack_chk_init(void);
 static void exit(int);
 
+__attribute__((weak))
 void _start_c(long *sp)
 {
        long argc;
index ca23598..a06e73e 100644 (file)
@@ -7,6 +7,7 @@
  */
 #define _GNU_SOURCE
 #include <sys/mman.h>
+#include <linux/mman.h>
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
index 834a90b..822ecaa 100644 (file)
@@ -24,11 +24,23 @@ enum dma_data_direction {
 #define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
 
 #define dma_map_single(d, p, s, dir) (virt_to_phys(p))
+#define dma_map_single_attrs(d, p, s, dir, a) (virt_to_phys(p))
 #define dma_mapping_error(...) (0)
 
 #define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
 #define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
 
+#define sg_dma_address(sg) (0)
+#define dma_need_sync(v, a) (0)
+#define dma_unmap_single_attrs(d, a, s, r, t) do { \
+       (void)(d); (void)(a); (void)(s); (void)(r); (void)(t); \
+} while (0)
+#define dma_sync_single_range_for_cpu(d, a, o, s, r) do { \
+       (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
+#define dma_sync_single_range_for_device(d, a, o, s, r) do { \
+       (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
 #define dma_max_mapping_size(...) SIZE_MAX
 
 #endif