Merge tag 'parisc-for-6.5-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/delle...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Aug 2023 20:09:05 +0000 (13:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Aug 2023 20:09:05 +0000 (13:09 -0700)
Pull parisc architecture fixes from Helge Deller:

 - early fixmap preallocation to fix boot failures on kernel >= 6.4

 - remove DMA leftover code in parport_gsc

 - drop old comments and code style fixes

* tag 'parisc-for-6.5-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: unaligned: Add required spaces after ','
  parport: gsc: remove DMA leftover code
  parisc: pci-dma: remove unused and dead EISA code and comment
  parisc/mm: preallocate fixmap page tables at init

238 files changed:
Documentation/ABI/testing/sysfs-bus-cxl
Documentation/admin-guide/kdump/vmcoreinfo.rst
Documentation/devicetree/bindings/net/mediatek,net.yaml
Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
MAINTAINERS
arch/arm/boot/dts/microchip/sam9x60.dtsi
arch/arm/boot/dts/nspire/nspire.dtsi
arch/arm/boot/dts/nxp/imx/imx53-sk-imx53.dts
arch/arm/boot/dts/nxp/imx/imx6sll.dtsi
arch/arm/mach-pxa/sharpsl_pm.h
arch/arm/mach-pxa/spitz_pm.c
arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
arch/arm64/boot/dts/arm/vexpress-v2m-rs1.dtsi [deleted symlink]
arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7904.dts
arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/renesas/r9a07g044.dtsi
arch/arm64/boot/dts/renesas/r9a07g054.dtsi
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ptrace.c
arch/powerpc/include/asm/word-at-a-time.h
arch/riscv/include/asm/acpi.h
arch/riscv/kernel/acpi.c
arch/riscv/kernel/crash_core.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/uapi/asm/ptrace.h
arch/s390/kernel/sthyi.c
arch/s390/kvm/intercept.c
arch/s390/mm/vmem.c
arch/x86/hyperv/hv_apic.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/hv_vtl.c
arch/x86/hyperv/ivm.c
arch/x86/hyperv/mmu.c
arch/x86/hyperv/nested.c
arch/x86/include/asm/mshyperv.h
drivers/block/rbd.c
drivers/clk/Kconfig
drivers/clk/imx/clk-imx93.c
drivers/clk/mediatek/clk-mt8183.c
drivers/clk/meson/clk-pll.c
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/cxlmem.h
drivers/firmware/arm_scmi/mailbox.c
drivers/firmware/arm_scmi/raw_mode.c
drivers/firmware/arm_scmi/smc.c
drivers/firmware/smccc/soc_id.c
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
drivers/gpu/drm/i915/gt/intel_gpu_commands.h
drivers/gpu/drm/i915/gt/intel_gt_regs.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gvt/edid.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/hv/connection.c
drivers/hv/hv_balloon.c
drivers/hv/hv_common.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/memory/tegra/mc.c
drivers/memory/tegra/tegra194.c
drivers/memory/tegra/tegra234.c
drivers/mtd/nand/raw/fsl_upm.c
drivers/mtd/nand/raw/meson_nand.c
drivers/mtd/nand/raw/omap_elm.c
drivers/mtd/nand/raw/rockchip-nand-controller.c
drivers/mtd/nand/spi/toshiba.c
drivers/mtd/nand/spi/winbond.c
drivers/mtd/spi-nor/spansion.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
drivers/net/ethernet/marvell/prestera/prestera_pci.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
drivers/net/ethernet/qlogic/qed/qed_fcoe.h
drivers/net/ethernet/qlogic/qed/qed_hw.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/sfc/falcon/selftest.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena/selftest.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/tap.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/zaurus.c
drivers/net/wireless/ath/ath11k/ahb.c
drivers/net/wireless/ath/ath11k/pcic.c
drivers/net/wireless/ath/ath6kl/Makefile
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/legacy/rayctl.h
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
drivers/powercap/intel_rapl_common.c
drivers/powercap/intel_rapl_msr.c
drivers/powercap/intel_rapl_tpmi.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_fc.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/storvsc_drv.c
drivers/soc/imx/imx8mp-blk-ctrl.c
drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/super.c
fs/erofs/super.c
fs/erofs/zdata.c
fs/exfat/balloc.c
fs/exfat/dir.c
fs/file.c
fs/nfsd/vfs.c
include/asm-generic/mshyperv.h
include/asm-generic/word-at-a-time.h
include/linux/cpumask.h
include/linux/hyperv.h
include/linux/intel_rapl.h
include/linux/spi/corgi_lcd.h
include/linux/spi/spi-mem.h
include/net/gro.h
include/net/inet_sock.h
include/net/ip.h
include/net/route.h
include/net/vxlan.h
include/soc/tegra/mc.h
include/uapi/linux/pkt_cls.h
kernel/bpf/cpumap.c
kernel/trace/bpf_trace.c
lib/Makefile
lib/cpumask.c
lib/test_bitmap.c
net/can/raw.c
net/ceph/osd_client.c
net/core/bpf_sk_storage.c
net/core/rtnetlink.c
net/core/sock.c
net/core/sock_map.c
net/dcb/dcbnl.c
net/dccp/ipv6.c
net/dsa/port.c
net/ipv4/inet_diag.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/ip6mr.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udp_offload.c
net/l2tp/l2tp_ip6.c
net/mptcp/sockopt.c
net/netfilter/nft_socket.c
net/netfilter/xt_socket.c
net/packet/af_packet.c
net/sched/cls_flower.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_u32.c
net/sched/em_meta.c
net/sched/sch_taprio.c
net/smc/af_smc.c
net/unix/af_unix.c
net/wireless/scan.c
net/xdp/xsk.c
net/xfrm/xfrm_policy.c
tools/hv/vmbus_testing
tools/perf/arch/arm64/util/pmu.c
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/tests/parse-events.c
tools/perf/tests/shell/test_uprobe_from_different_cu.sh
tools/perf/util/parse-events.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/pmus.c
tools/testing/selftests/net/so_incoming_cpu.c
tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c
tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
tools/testing/vsock/Makefile

index 6350dd8..087f762 100644 (file)
@@ -82,7 +82,12 @@ Description:
                whether it resides in persistent capacity, volatile capacity,
                or the LSA, is made permanently unavailable by whatever means
                is appropriate for the media type. This functionality requires
-               the device to be not be actively decoding any HPA ranges.
+               the device to be disabled, that is, not actively decoding any
+               HPA ranges. This permits avoiding explicit global CPU cache
+               management, relying instead for it to be done when a region
+               transitions between software programmed and hardware committed
+               states. If this file is not present, then there is no hardware
+               support for the operation.
 
 
 What            /sys/bus/cxl/devices/memX/security/erase
@@ -92,7 +97,13 @@ Contact:        linux-cxl@vger.kernel.org
 Description:
                (WO) Write a boolean 'true' string value to this attribute to
                secure erase user data by changing the media encryption keys for
-               all user data areas of the device.
+               all user data areas of the device. This functionality requires
+               the device to be disabled, that is, not actively decoding any
+               HPA ranges. This permits avoiding explicit global CPU cache
+               management, relying instead for it to be done when a region
+               transitions between software programmed and hardware committed
+               states. If this file is not present, then there is no hardware
+               support for the operation.
 
 
 What:          /sys/bus/cxl/devices/memX/firmware/
index c18d94f..f8ebb63 100644 (file)
@@ -624,3 +624,9 @@ Used to get the correct ranges:
   * VMALLOC_START ~ VMALLOC_END : vmalloc() / ioremap() space.
   * VMEMMAP_START ~ VMEMMAP_END : vmemmap space, used for struct page array.
   * KERNEL_LINK_ADDR : start address of Kernel link and BPF
+
+va_kernel_pa_offset
+-------------------
+
+Indicates the offset between the kernel virtual and physical mappings.
+Used to translate virtual to physical addresses.
index acb2b2a..31cc0c4 100644 (file)
@@ -293,7 +293,7 @@ allOf:
 patternProperties:
   "^mac@[0-1]$":
     type: object
-    additionalProperties: false
+    unevaluatedProperties: false
     allOf:
       - $ref: ethernet-controller.yaml#
     description:
@@ -305,14 +305,9 @@ patternProperties:
       reg:
         maxItems: 1
 
-      phy-handle: true
-
-      phy-mode: true
-
     required:
       - reg
       - compatible
-      - phy-handle
 
 required:
   - compatible
index 176ea5f..7f324c6 100644 (file)
@@ -91,12 +91,18 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle
 
   tx_delay:
-    description: Delay value for TXD timing. Range value is 0~0x7F, 0x30 as default.
+    description: Delay value for TXD timing.
     $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 0x7F
+    default: 0x30
 
   rx_delay:
-    description: Delay value for RXD timing. Range value is 0~0x7F, 0x10 as default.
+    description: Delay value for RXD timing.
     $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 0x7F
+    default: 0x10
 
   phy-supply:
     description: PHY regulator
index 30b2131..65cb2e5 100644 (file)
@@ -16,7 +16,6 @@ properties:
       - enum:
           - atmel,at91rm9200-usart
           - atmel,at91sam9260-usart
-          - microchip,sam9x60-usart
       - items:
           - const: atmel,at91rm9200-dbgu
           - const: atmel,at91rm9200-usart
@@ -24,6 +23,9 @@ properties:
           - const: atmel,at91sam9260-dbgu
           - const: atmel,at91sam9260-usart
       - items:
+          - const: microchip,sam9x60-usart
+          - const: atmel,at91sam9260-usart
+      - items:
           - const: microchip,sam9x60-dbgu
           - const: microchip,sam9x60-usart
           - const: atmel,at91sam9260-dbgu
index 53b7ca8..0f966f0 100644 (file)
@@ -3262,9 +3262,8 @@ F:        Documentation/devicetree/bindings/input/atmel,maxtouch.yaml
 F:     drivers/input/touchscreen/atmel_mxt_ts.c
 
 ATMEL WIRELESS DRIVER
-M:     Simon Kelley <simon@thekelleys.org.uk>
 L:     linux-wireless@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     http://www.thekelleys.org.uk/atmel
 W:     http://atmelwlandriver.sourceforge.net/
 F:     drivers/net/wireless/atmel/atmel*
@@ -3394,7 +3393,7 @@ F:        drivers/media/radio/radio-aztech*
 B43 WIRELESS DRIVER
 L:     linux-wireless@vger.kernel.org
 L:     b43-dev@lists.infradead.org
-S:     Odd Fixes
+S:     Orphan
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/b43
 F:     drivers/net/wireless/broadcom/b43/
 
@@ -5462,8 +5461,7 @@ F:        Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
 F:     drivers/net/can/ctucanfd/
 
 CW1200 WLAN driver
-M:     Solomon Peachy <pizza@shaftnet.org>
-S:     Maintained
+S:     Orphan
 F:     drivers/net/wireless/st/cw1200/
 
 CX18 VIDEO4LINUX DRIVER
@@ -9662,6 +9660,7 @@ F:        tools/hv/
 
 HYPERBUS SUPPORT
 M:     Vignesh Raghavendra <vigneshr@ti.com>
+R:     Tudor Ambarus <tudor.ambarus@linaro.org>
 L:     linux-mtd@lists.infradead.org
 S:     Supported
 Q:     http://patchwork.ozlabs.org/project/linux-mtd/list/
@@ -12592,18 +12591,14 @@ F:    Documentation/devicetree/bindings/net/marvell,pp2.yaml
 F:     drivers/net/ethernet/marvell/mvpp2/
 
 MARVELL MWIFIEX WIRELESS DRIVER
-M:     Amitkumar Karwar <amitkarwar@gmail.com>
-M:     Ganapathi Bhat <ganapathi017@gmail.com>
-M:     Sharvari Harisangam <sharvari.harisangam@nxp.com>
-M:     Xinming Hu <huxinming820@gmail.com>
+M:     Brian Norris <briannorris@chromium.org>
 L:     linux-wireless@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     drivers/net/wireless/marvell/mwifiex/
 
 MARVELL MWL8K WIRELESS DRIVER
-M:     Lennert Buytenhek <buytenh@wantstofly.org>
 L:     linux-wireless@vger.kernel.org
-S:     Odd Fixes
+S:     Orphan
 F:     drivers/net/wireless/marvell/mwl8k.c
 
 MARVELL NAND CONTROLLER DRIVER
@@ -17449,6 +17444,7 @@ F:      drivers/media/tuners/qt1010*
 
 QUALCOMM ATH12K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@kernel.org>
+M:     Jeff Johnson <quic_jjohnson@quicinc.com>
 L:     ath12k@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -17456,6 +17452,7 @@ F:      drivers/net/wireless/ath/ath12k/
 
 QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@kernel.org>
+M:     Jeff Johnson <quic_jjohnson@quicinc.com>
 L:     ath10k@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
@@ -17465,6 +17462,7 @@ F:      drivers/net/wireless/ath/ath10k/
 
 QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
 M:     Kalle Valo <kvalo@kernel.org>
+M:     Jeff Johnson <quic_jjohnson@quicinc.com>
 L:     ath11k@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
@@ -17985,7 +17983,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
 F:     drivers/net/wireless/realtek/rtlwifi/
 
 REALTEK WIRELESS DRIVER (rtw88)
-M:     Yan-Hsuan Chuang <tony0620emma@gmail.com>
+M:     Ping-Ke Shih <pkshih@realtek.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtw88/
@@ -20404,7 +20402,6 @@ F:      drivers/pwm/pwm-stm32*
 F:     include/linux/*/stm32-*tim*
 
 STMMAC ETHERNET DRIVER
-M:     Giuseppe Cavallaro <peppe.cavallaro@st.com>
 M:     Alexandre Torgue <alexandre.torgue@foss.st.com>
 M:     Jose Abreu <joabreu@synopsys.com>
 L:     netdev@vger.kernel.org
@@ -21681,11 +21678,14 @@ S:    Orphan
 F:     drivers/net/ethernet/dec/tulip/
 
 TUN/TAP driver
-M:     Maxim Krasnyansky <maxk@qti.qualcomm.com>
+M:     Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+M:     Jason Wang <jasowang@redhat.com>
 S:     Maintained
 W:     http://vtun.sourceforge.net/tun
 F:     Documentation/networking/tuntap.rst
 F:     arch/um/os-Linux/drivers/
+F:     drivers/net/tap.c
+F:     drivers/net/tun.c
 
 TURBOCHANNEL SUBSYSTEM
 M:     "Maciej W. Rozycki" <macro@orcam.me.uk>
@@ -21908,9 +21908,8 @@ S:      Maintained
 F:     drivers/usb/misc/apple-mfi-fastcharge.c
 
 USB AR5523 WIRELESS DRIVER
-M:     Pontus Fuchs <pontus.fuchs@gmail.com>
 L:     linux-wireless@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/net/wireless/ath/ar5523/
 
 USB ATTACHED SCSI
@@ -22187,9 +22186,8 @@ F:      drivers/usb/gadget/legacy/webcam.c
 F:     include/uapi/linux/usb/g_uvc.h
 
 USB WIRELESS RNDIS DRIVER (rndis_wlan)
-M:     Jussi Kivilinna <jussi.kivilinna@iki.fi>
 L:     linux-wireless@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/net/wireless/legacy/rndis_wlan.c
 
 USB XHCI DRIVER
@@ -22964,7 +22962,7 @@ F:      drivers/input/misc/wistron_btns.c
 
 WL3501 WIRELESS PCMCIA CARD DRIVER
 L:     linux-wireless@vger.kernel.org
-S:     Odd fixes
+S:     Orphan
 F:     drivers/net/wireless/legacy/wl3501*
 
 WMI BINARY MOF DRIVER
@@ -23535,11 +23533,8 @@ S:     Maintained
 F:     mm/zbud.c
 
 ZD1211RW WIRELESS DRIVER
-M:     Ulrich Kunitz <kune@deine-taler.de>
 L:     linux-wireless@vger.kernel.org
-L:     zd1211-devs@lists.sourceforge.net (subscribers-only)
-S:     Maintained
-W:     http://zd1211.ath.cx/wiki/DriverRewrite
+S:     Orphan
 F:     drivers/net/wireless/zydas/zd1211rw/
 
 ZD1301 MEDIA DRIVER
index 8b53997..73d570a 100644 (file)
                                status = "disabled";
 
                                uart4: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <13 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart5: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        atmel,usart-mode = <AT91_USART_MODE_SERIAL>;
                                        interrupts = <14 IRQ_TYPE_LEVEL_HIGH 7>;
                                status = "disabled";
 
                                uart11: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <32 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart12: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <33 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart6: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <9 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart7: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <10 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart8: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <11 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart0: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <5 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart1: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <6 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart2: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <7 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart3: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <8 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart9: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <15 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
                                status = "disabled";
 
                                uart10: serial@200 {
-                                       compatible = "microchip,sam9x60-dbgu", "microchip,sam9x60-usart", "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+                                       compatible = "microchip,sam9x60-usart", "atmel,at91sam9260-usart";
                                        reg = <0x200 0x200>;
                                        interrupts = <16 IRQ_TYPE_LEVEL_HIGH 7>;
                                        dmas = <&dma0
index bb240e6..088bcc3 100644 (file)
                        };
 
                        watchdog: watchdog@90060000 {
-                               compatible = "arm,amba-primecell";
+                               compatible = "arm,primecell";
                                reg = <0x90060000 0x1000>;
                                interrupts = <3>;
                        };
index 103e731..1a00d29 100644 (file)
        status = "okay";
 };
 
+&cpu0 {
+       /* CPU rated to 800 MHz, not the default 1.2GHz. */
+       operating-points = <
+               /* kHz   uV */
+               166666  850000
+               400000  900000
+               800000  1050000
+       >;
+};
+
 &ecspi1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_ecspi1>;
index 2873369..3659fd5 100644 (file)
                                reg = <0x020ca000 0x1000>;
                                interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6SLL_CLK_USBPHY2>;
-                               phy-reg_3p0-supply = <&reg_3p0>;
+                               phy-3p0-supply = <&reg_3p0>;
                                fsl,anatop = <&anatop>;
                        };
 
index 20e4cab..623167f 100644 (file)
@@ -105,5 +105,4 @@ void sharpsl_pm_led(int val);
 #define MAX1111_ACIN_VOLT   6u
 int sharpsl_pm_pxa_read_max1111(int channel);
 
-void corgi_lcd_limit_intensity(int limit);
 #endif
index 1c021ce..8bc4ea5 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/apm-emulation.h>
+#include <linux/spi/corgi_lcd.h>
 
 #include <asm/irq.h>
 #include <asm/mach-types.h>
index 38ae674..3037f58 100644 (file)
        status = "okay";
        clock-frequency = <100000>;
        i2c-sda-falling-time-ns = <890>;  /* hcnt */
-       i2c-sdl-falling-time-ns = <890>;  /* lcnt */
+       i2c-scl-falling-time-ns = <890>;  /* lcnt */
 
        pinctrl-names = "default", "gpio";
        pinctrl-0 = <&i2c1_pmx_func>;
index ede99dc..f4cf30b 100644 (file)
        status = "okay";
        clock-frequency = <100000>;
        i2c-sda-falling-time-ns = <890>;  /* hcnt */
-       i2c-sdl-falling-time-ns = <890>;  /* lcnt */
+       i2c-scl-falling-time-ns = <890>;  /* lcnt */
 
        adc@14 {
                compatible = "lltc,ltc2497";
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2m-rs1.dtsi b/arch/arm64/boot/dts/arm/vexpress-v2m-rs1.dtsi
deleted file mode 120000 (symlink)
index 68fd0f8..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi
\ No newline at end of file
index 03e7679..479948f 100644 (file)
 };
 
 &gpio1 {
-       gpio-line-names = "nINT_ETHPHY", "LED_RED", "WDOG_INT", "X_RTC_INT",
+       gpio-line-names = "", "LED_RED", "WDOG_INT", "X_RTC_INT",
                "", "", "", "RESET_ETHPHY",
                "CAN_nINT", "CAN_EN", "nENABLE_FLATLINK", "",
                "USB_OTG_VBUS_EN", "", "LED_GREEN", "LED_BLUE";
index 92616bc..847f085 100644 (file)
 };
 
 &gpio1 {
-       gpio-line-names = "nINT_ETHPHY", "", "WDOG_INT", "X_RTC_INT",
+       gpio-line-names = "", "", "WDOG_INT", "X_RTC_INT",
                "", "", "", "RESET_ETHPHY",
                "", "", "nENABLE_FLATLINK";
 };
                                };
                        };
 
-                       reg_vdd_gpu: buck3 {
+                       reg_vdd_vpu: buck3 {
                                regulator-always-on;
                                regulator-boot-on;
                                regulator-max-microvolt = <1000000>;
index 6f26914..07b07dc 100644 (file)
        status = "okay";
 };
 
+&disp_blk_ctrl {
+       status = "disabled";
+};
+
 &pgc_mipi {
        status = "disabled";
 };
index 93088fa..d5b7168 100644 (file)
        status = "okay";
 };
 
+&disp_blk_ctrl {
+       status = "disabled";
+};
+
 &pgc_mipi {
        status = "disabled";
 };
index d3a6710..b8946ed 100644 (file)
                        MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC           0x91
                        MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL     0x91
                        MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL     0x1f
-                       MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9               0x19
+                       MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9               0x159
                >;
        };
 
index 1a2d2c0..01eec42 100644 (file)
                                                                         <&clk IMX8MQ_SYS1_PLL_800M>,
                                                                         <&clk IMX8MQ_VPU_PLL>;
                                                assigned-clock-rates = <600000000>,
-                                                                      <600000000>,
+                                                                      <300000000>,
                                                                       <800000000>,
                                                                       <0>;
                                        };
index 232910e..66f68fc 100644 (file)
                                     <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
                                     <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
                        interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0",
-                                         "tgiv0", "tgie0", "tgif0",
-                                         "tgia1", "tgib1", "tgiv1", "tgiu1",
-                                         "tgia2", "tgib2", "tgiv2", "tgiu2",
+                                         "tciv0", "tgie0", "tgif0",
+                                         "tgia1", "tgib1", "tciv1", "tciu1",
+                                         "tgia2", "tgib2", "tciv2", "tciu2",
                                          "tgia3", "tgib3", "tgic3", "tgid3",
-                                         "tgiv3",
+                                         "tciv3",
                                          "tgia4", "tgib4", "tgic4", "tgid4",
-                                         "tgiv4",
+                                         "tciv4",
                                          "tgiu5", "tgiv5", "tgiw5",
                                          "tgia6", "tgib6", "tgic6", "tgid6",
-                                         "tgiv6",
+                                         "tciv6",
                                          "tgia7", "tgib7", "tgic7", "tgid7",
-                                         "tgiv7",
+                                         "tciv7",
                                          "tgia8", "tgib8", "tgic8", "tgid8",
-                                         "tgiv8", "tgiu8";
+                                         "tciv8", "tciu8";
                        clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
                        power-domains = <&cpg>;
                        resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
index 2eba3a8..1f1d481 100644 (file)
                                     <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
                                     <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
                        interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0",
-                                         "tgiv0", "tgie0", "tgif0",
-                                         "tgia1", "tgib1", "tgiv1", "tgiu1",
-                                         "tgia2", "tgib2", "tgiv2", "tgiu2",
+                                         "tciv0", "tgie0", "tgif0",
+                                         "tgia1", "tgib1", "tciv1", "tciu1",
+                                         "tgia2", "tgib2", "tciv2", "tciu2",
                                          "tgia3", "tgib3", "tgic3", "tgid3",
-                                         "tgiv3",
+                                         "tciv3",
                                          "tgia4", "tgib4", "tgic4", "tgid4",
-                                         "tgiv4",
+                                         "tciv4",
                                          "tgiu5", "tgiv5", "tgiw5",
                                          "tgia6", "tgib6", "tgic6", "tgid6",
-                                         "tgiv6",
+                                         "tciv6",
                                          "tgia7", "tgib7", "tgic7", "tgid7",
-                                         "tgiv7",
+                                         "tciv7",
                                          "tgia8", "tgib8", "tgic8", "tgid8",
-                                         "tgiv8", "tgiu8";
+                                         "tciv8", "tciu8";
                        clocks = <&cpg CPG_MOD R9A07G054_MTU_X_MCK_MTU3>;
                        power-domains = <&cpg>;
                        resets = <&cpg R9A07G054_MTU_X_PRESET_MTU3>;
index 520b681..75c37b1 100644 (file)
@@ -679,7 +679,7 @@ static void fpsimd_to_sve(struct task_struct *task)
        void *sst = task->thread.sve_state;
        struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
 
-       if (!system_supports_sve())
+       if (!system_supports_sve() && !system_supports_sme())
                return;
 
        vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
@@ -705,7 +705,7 @@ static void sve_to_fpsimd(struct task_struct *task)
        unsigned int i;
        __uint128_t const *p;
 
-       if (!system_supports_sve())
+       if (!system_supports_sve() && !system_supports_sme())
                return;
 
        vl = thread_get_cur_vl(&task->thread);
@@ -835,7 +835,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
        void *sst = task->thread.sve_state;
        struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
 
-       if (!test_tsk_thread_flag(task, TIF_SVE))
+       if (!test_tsk_thread_flag(task, TIF_SVE) &&
+           !thread_sm_enabled(&task->thread))
                return;
 
        vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
@@ -909,7 +910,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
                         */
                        task->thread.svcr &= ~(SVCR_SM_MASK |
                                               SVCR_ZA_MASK);
-                       clear_thread_flag(TIF_SME);
+                       clear_tsk_thread_flag(task, TIF_SME);
                        free_sme = true;
                }
        }
index d7f4f0d..5b9b430 100644 (file)
@@ -932,11 +932,13 @@ static int sve_set_common(struct task_struct *target,
        /*
         * Ensure target->thread.sve_state is up to date with target's
         * FPSIMD regs, so that a short copyin leaves trailing
-        * registers unmodified.  Always enable SVE even if going into
-        * streaming mode.
+        * registers unmodified.  Only enable SVE if we are
+        * configuring normal SVE, a system with streaming SVE may not
+        * have normal SVE.
         */
        fpsimd_sync_to_sve(target);
-       set_tsk_thread_flag(target, TIF_SVE);
+       if (type == ARM64_VEC_SVE)
+               set_tsk_thread_flag(target, TIF_SVE);
        target->thread.fp_type = FP_STATE_SVE;
 
        BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
@@ -1180,6 +1182,8 @@ static int zt_set(struct task_struct *target,
        if (ret == 0)
                target->thread.svcr |= SVCR_ZA_MASK;
 
+       fpsimd_flush_task_state(target);
+
        return ret;
 }
 
index 46c31fb..30a12d2 100644 (file)
@@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask)
        return leading_zero_bits >> 3;
 }
 
-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
 {
        unsigned long rhs = val | c->low_bits;
        *data = rhs;
index f71ce21..d5604d2 100644 (file)
@@ -19,7 +19,7 @@ typedef u64 phys_cpuid_t;
 #define PHYS_CPUID_INVALID INVALID_HARTID
 
 /* ACPI table mapping after acpi_permanent_mmap is set */
-void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
 #define acpi_os_ioremap acpi_os_ioremap
 
 #define acpi_strict 1  /* No out-of-spec workarounds on RISC-V */
index 5ee03eb..56cb2c9 100644 (file)
@@ -215,9 +215,9 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
        early_iounmap(map, size);
 }
 
-void *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
 {
-       return memremap(phys, size, MEMREMAP_WB);
+       return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
 }
 
 #ifdef CONFIG_PCI
index b351a3c..55f1d78 100644 (file)
@@ -18,4 +18,6 @@ void arch_crash_save_vmcoreinfo(void)
        vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
 #endif
        vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
+       vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n",
+                                               kernel_map.va_kernel_pa_offset);
 }
index d03d4cb..3a6a9a8 100644 (file)
@@ -116,7 +116,6 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
-CONFIG_NET_TC_SKB_EXT=y
 CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
@@ -193,6 +192,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NETFILTER_XTABLES_COMPAT=y
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -379,6 +379,7 @@ CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_NET_ACT_GATE=m
+CONFIG_NET_TC_SKB_EXT=y
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -395,6 +396,7 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_SAFE=y
+# CONFIG_FW_LOADER is not set
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
@@ -502,7 +504,6 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_WANGXUN is not set
 # CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
@@ -542,6 +543,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VERTEXCOM is not set
 # CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 # CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
@@ -646,7 +648,6 @@ CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_TMPFS_INODE64=y
 CONFIG_HUGETLBFS=y
-CONFIG_CONFIGFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
@@ -690,7 +691,6 @@ CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_LOCKDOWN_LSM=y
 CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_SECURITY_LANDLOCK=y
@@ -744,7 +744,6 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_WP512=m
@@ -844,6 +843,7 @@ CONFIG_PREEMPT_TRACER=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_USER_EVENTS=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_FTRACE_STARTUP_TEST=y
 # CONFIG_EVENT_TRACE_STARTUP_TEST is not set
@@ -866,6 +866,7 @@ CONFIG_FAIL_MAKE_REQUEST=y
 CONFIG_FAIL_IO_TIMEOUT=y
 CONFIG_FAIL_FUTEX=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_CONFIGFS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
index 1855759..b13a5a0 100644 (file)
@@ -107,7 +107,6 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
-CONFIG_NET_TC_SKB_EXT=y
 CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
@@ -184,6 +183,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NETFILTER_XTABLES_COMPAT=y
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -369,6 +369,7 @@ CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_NET_ACT_GATE=m
+CONFIG_NET_TC_SKB_EXT=y
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -385,6 +386,7 @@ CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_UEVENT_HELPER=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_SAFE=y
+# CONFIG_FW_LOADER is not set
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
@@ -492,7 +494,6 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_WANGXUN is not set
 # CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
@@ -532,6 +533,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VERTEXCOM is not set
 # CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 # CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
@@ -673,7 +675,6 @@ CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_LOCKDOWN_LSM=y
 CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_SECURITY_LANDLOCK=y
@@ -729,7 +730,6 @@ CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_SM3_GENERIC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_WP512=m
@@ -793,6 +793,7 @@ CONFIG_STACK_TRACER=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_USER_EVENTS=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
index 6f68b39..e62fb20 100644 (file)
@@ -53,7 +53,6 @@ CONFIG_ZFCP=y
 # CONFIG_HVC_IUCV is not set
 # CONFIG_HW_RANDOM_S390 is not set
 # CONFIG_HMC_DRV is not set
-# CONFIG_S390_UV_UAPI is not set
 # CONFIG_S390_TAPE is not set
 # CONFIG_VMCP is not set
 # CONFIG_MONWRITER is not set
index f0fe3bc..bb08260 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _UAPI_S390_PTRACE_H
 #define _UAPI_S390_PTRACE_H
 
+#include <linux/const.h>
+
 /*
  * Offsets in the user_regs_struct. They are used for the ptrace
  * system call and in entry.S
index 4d141e2..2ea7f20 100644 (file)
@@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc)
  *
  * Fills the destination with system information returned by the STHYI
  * instruction. The data is generated by emulation or execution of STHYI,
- * if available. The return value is the condition code that would be
- * returned, the rc parameter is the return code which is passed in
- * register R2 + 1.
+ * if available. The return value is either a negative error value or
+ * the condition code that would be returned, the rc parameter is the
+ * return code which is passed in register R2 + 1.
  */
 int sthyi_fill(void *dst, u64 *rc)
 {
index 954d39a..341abaf 100644 (file)
@@ -389,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
  */
 int handle_sthyi(struct kvm_vcpu *vcpu)
 {
-       int reg1, reg2, r = 0;
-       u64 code, addr, cc = 0, rc = 0;
+       int reg1, reg2, cc = 0, r = 0;
+       u64 code, addr, rc = 0;
        struct sthyi_sctns *sctns = NULL;
 
        if (!test_kvm_facility(vcpu->kvm, 74))
@@ -421,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
                return -ENOMEM;
 
        cc = sthyi_fill(sctns, &rc);
-
+       if (cc < 0) {
+               free_page((unsigned long)sctns);
+               return cc;
+       }
 out:
        if (!cc) {
                if (kvm_s390_pv_cpu_is_protected(vcpu)) {
index b266492..24a6667 100644 (file)
@@ -763,6 +763,8 @@ void __init vmem_map_init(void)
        if (static_key_enabled(&cpu_has_bear))
                set_memory_nx(0, 1);
        set_memory_nx(PAGE_SIZE, 1);
+       if (debug_pagealloc_enabled())
+               set_memory_4k(0, ident_map_size >> PAGE_SHIFT);
 
        pr_info("Write protected kernel read-only data: %luk\n",
                (unsigned long)(__end_rodata - _stext) >> 10);
index 1fbda2f..b21335e 100644 (file)
@@ -107,7 +107,6 @@ static bool cpu_is_self(int cpu)
 static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
                bool exclude_self)
 {
-       struct hv_send_ipi_ex **arg;
        struct hv_send_ipi_ex *ipi_arg;
        unsigned long flags;
        int nr_bank = 0;
@@ -117,9 +116,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
                return false;
 
        local_irq_save(flags);
-       arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
+       ipi_arg = *this_cpu_ptr(hyperv_pcpu_input_arg);
 
-       ipi_arg = *arg;
        if (unlikely(!ipi_arg))
                goto ipi_mask_ex_done;
 
index 6c04b52..953e280 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/apic.h>
 #include <asm/desc.h>
 #include <asm/sev.h>
+#include <asm/ibt.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
@@ -472,6 +473,26 @@ void __init hyperv_init(void)
        }
 
        /*
+        * Some versions of Hyper-V that provide IBT in guest VMs have a bug
+        * in that there's no ENDBR64 instruction at the entry to the
+        * hypercall page. Because hypercalls are invoked via an indirect call
+        * to the hypercall page, all hypercall attempts fail when IBT is
+        * enabled, and Linux panics. For such buggy versions, disable IBT.
+        *
+        * Fixed versions of Hyper-V always provide ENDBR64 on the hypercall
+        * page, so if future Linux kernel versions enable IBT for 32-bit
+        * builds, additional hypercall page hackery will be required here
+        * to provide an ENDBR32.
+        */
+#ifdef CONFIG_X86_KERNEL_IBT
+       if (cpu_feature_enabled(X86_FEATURE_IBT) &&
+           *(u32 *)hv_hypercall_pg != gen_endbr()) {
+               setup_clear_cpu_cap(X86_FEATURE_IBT);
+               pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n");
+       }
+#endif
+
+       /*
         * hyperv_init() is called before LAPIC is initialized: see
         * apic_intr_mode_init() -> x86_platform.apic_post_init() and
         * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
index 85d38b9..db5d2ea 100644 (file)
@@ -25,6 +25,10 @@ void __init hv_vtl_init_platform(void)
        x86_init.irqs.pre_vector_init = x86_init_noop;
        x86_init.timers.timer_init = x86_init_noop;
 
+       /* Avoid searching for BIOS MP tables */
+       x86_init.mpparse.find_smp_config = x86_init_noop;
+       x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+
        x86_platform.get_wallclock = get_rtc_noop;
        x86_platform.set_wallclock = set_rtc_noop;
        x86_platform.get_nmi_reason = hv_get_nmi_reason;
index 14f46ad..28be6df 100644 (file)
@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
                           enum hv_mem_host_visibility visibility)
 {
-       struct hv_gpa_range_for_visibility **input_pcpu, *input;
+       struct hv_gpa_range_for_visibility *input;
        u16 pages_processed;
        u64 hv_status;
        unsigned long flags;
@@ -263,9 +263,8 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
        }
 
        local_irq_save(flags);
-       input_pcpu = (struct hv_gpa_range_for_visibility **)
-                       this_cpu_ptr(hyperv_pcpu_input_arg);
-       input = *input_pcpu;
+       input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
        if (unlikely(!input)) {
                local_irq_restore(flags);
                return -EINVAL;
index 8460bd3..1cc1132 100644 (file)
@@ -61,7 +61,6 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
                                   const struct flush_tlb_info *info)
 {
        int cpu, vcpu, gva_n, max_gvas;
-       struct hv_tlb_flush **flush_pcpu;
        struct hv_tlb_flush *flush;
        u64 status;
        unsigned long flags;
@@ -74,10 +73,7 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
 
        local_irq_save(flags);
 
-       flush_pcpu = (struct hv_tlb_flush **)
-                    this_cpu_ptr(hyperv_pcpu_input_arg);
-
-       flush = *flush_pcpu;
+       flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
 
        if (unlikely(!flush)) {
                local_irq_restore(flags);
@@ -178,17 +174,13 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
                                      const struct flush_tlb_info *info)
 {
        int nr_bank = 0, max_gvas, gva_n;
-       struct hv_tlb_flush_ex **flush_pcpu;
        struct hv_tlb_flush_ex *flush;
        u64 status;
 
        if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
                return HV_STATUS_INVALID_PARAMETER;
 
-       flush_pcpu = (struct hv_tlb_flush_ex **)
-                    this_cpu_ptr(hyperv_pcpu_input_arg);
-
-       flush = *flush_pcpu;
+       flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
 
        if (info->mm) {
                /*
index 5d70968..9dc259f 100644 (file)
@@ -19,7 +19,6 @@
 
 int hyperv_flush_guest_mapping(u64 as)
 {
-       struct hv_guest_mapping_flush **flush_pcpu;
        struct hv_guest_mapping_flush *flush;
        u64 status;
        unsigned long flags;
@@ -30,10 +29,7 @@ int hyperv_flush_guest_mapping(u64 as)
 
        local_irq_save(flags);
 
-       flush_pcpu = (struct hv_guest_mapping_flush **)
-               this_cpu_ptr(hyperv_pcpu_input_arg);
-
-       flush = *flush_pcpu;
+       flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
 
        if (unlikely(!flush)) {
                local_irq_restore(flags);
@@ -90,7 +86,6 @@ EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
 int hyperv_flush_guest_mapping_range(u64 as,
                hyperv_fill_flush_list_func fill_flush_list_func, void *data)
 {
-       struct hv_guest_mapping_flush_list **flush_pcpu;
        struct hv_guest_mapping_flush_list *flush;
        u64 status;
        unsigned long flags;
@@ -102,10 +97,8 @@ int hyperv_flush_guest_mapping_range(u64 as,
 
        local_irq_save(flags);
 
-       flush_pcpu = (struct hv_guest_mapping_flush_list **)
-               this_cpu_ptr(hyperv_pcpu_input_arg);
+       flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
 
-       flush = *flush_pcpu;
        if (unlikely(!flush)) {
                local_irq_restore(flags);
                goto fault;
index 88d9ef9..fa83d88 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/types.h>
 #include <linux/nmi.h>
 #include <linux/msi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/nospec-branch.h>
 #include <asm/paravirt.h>
index 24afcc9..2328cc0 100644 (file)
@@ -3675,7 +3675,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
        ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
                            RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
                            RBD_LOCK_TAG, "", 0);
-       if (ret)
+       if (ret && ret != -EEXIST)
                return ret;
 
        __rbd_lock(rbd_dev, cookie);
@@ -3878,7 +3878,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
                                 &rbd_dev->header_oloc, RBD_LOCK_NAME,
                                 &lock_type, &lock_tag, &lockers, &num_lockers);
        if (ret) {
-               rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
+               rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
                return ERR_PTR(ret);
        }
 
@@ -3940,8 +3940,10 @@ static int find_watcher(struct rbd_device *rbd_dev,
        ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
                                      &rbd_dev->header_oloc, &watchers,
                                      &num_watchers);
-       if (ret)
+       if (ret) {
+               rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
                return ret;
+       }
 
        sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
        for (i = 0; i < num_watchers; i++) {
@@ -3985,8 +3987,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
                locker = refreshed_locker = NULL;
 
                ret = rbd_lock(rbd_dev);
-               if (ret != -EBUSY)
+               if (!ret)
+                       goto out;
+               if (ret != -EBUSY) {
+                       rbd_warn(rbd_dev, "failed to lock header: %d", ret);
                        goto out;
+               }
 
                /* determine if the current lock holder is still alive */
                locker = get_lock_owner_info(rbd_dev);
@@ -4089,11 +4095,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
 
        ret = rbd_try_lock(rbd_dev);
        if (ret < 0) {
-               rbd_warn(rbd_dev, "failed to lock header: %d", ret);
-               if (ret == -EBLOCKLISTED)
-                       goto out;
-
-               ret = 1; /* request lock anyway */
+               rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
+               goto out;
        }
        if (ret > 0) {
                up_write(&rbd_dev->lock_rwsem);
@@ -6627,12 +6630,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
                cancel_delayed_work_sync(&rbd_dev->lock_dwork);
                if (!ret)
                        ret = -ETIMEDOUT;
-       }
 
-       if (ret) {
-               rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
-               return ret;
+               rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
        }
+       if (ret)
+               return ret;
 
        /*
         * The lock may have been released by now, unless automatic lock
index 93f38a8..6b3b424 100644 (file)
@@ -444,6 +444,7 @@ config COMMON_CLK_BD718XX
 config COMMON_CLK_FIXED_MMIO
        bool "Clock driver for Memory Mapped Fixed values"
        depends on COMMON_CLK && OF
+       depends on HAS_IOMEM
        help
          Support for Memory Mapped IO Fixed clocks
 
index b6c7c27..44f4351 100644 (file)
@@ -291,7 +291,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
        anatop_base = devm_of_iomap(dev, np, 0, NULL);
        of_node_put(np);
        if (WARN_ON(IS_ERR(anatop_base))) {
-               ret = PTR_ERR(base);
+               ret = PTR_ERR(anatop_base);
                goto unregister_hws;
        }
 
index 1ba421b..e31f943 100644 (file)
@@ -328,6 +328,14 @@ static const char * const atb_parents[] = {
        "syspll_d5"
 };
 
+static const char * const sspm_parents[] = {
+       "clk26m",
+       "univpll_d2_d4",
+       "syspll_d2_d2",
+       "univpll_d2_d2",
+       "syspll_d3"
+};
+
 static const char * const dpi0_parents[] = {
        "clk26m",
        "tvdpll_d2",
@@ -507,6 +515,9 @@ static const struct mtk_mux top_muxes[] = {
        /* CLK_CFG_6 */
        MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel",
                atb_parents, 0xa0, 0xa4, 0xa8, 0, 2, 7, 0x004, 24),
+       MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SSPM, "sspm_sel",
+                                  sspm_parents, 0xa0, 0xa4, 0xa8, 8, 3, 15, 0x004, 25,
+                                  CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
        MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel",
                dpi0_parents, 0xa0, 0xa4, 0xa8, 16, 4, 23, 0x004, 26),
        MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel",
@@ -673,10 +684,18 @@ static const struct mtk_gate_regs infra3_cg_regs = {
        GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift,  \
                &mtk_clk_gate_ops_setclr)
 
+#define GATE_INFRA2_FLAGS(_id, _name, _parent, _shift, _flag)  \
+       GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs,    \
+                      _shift, &mtk_clk_gate_ops_setclr, _flag)
+
 #define GATE_INFRA3(_id, _name, _parent, _shift)               \
        GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift,  \
                &mtk_clk_gate_ops_setclr)
 
+#define GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, _flag)  \
+       GATE_MTK_FLAGS(_id, _name, _parent, &infra3_cg_regs,    \
+                      _shift, &mtk_clk_gate_ops_setclr, _flag)
+
 static const struct mtk_gate infra_clks[] = {
        /* INFRA0 */
        GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "axi_sel", 0),
@@ -748,7 +767,11 @@ static const struct mtk_gate infra_clks[] = {
        GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick", "fufs_sel", 12),
        GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck", "fufs_sel", 13),
        GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", "axi_sel", 14),
+       /* infra_sspm is main clock in co-processor, should not be closed in Linux. */
+       GATE_INFRA2_FLAGS(CLK_INFRA_SSPM, "infra_sspm", "sspm_sel", 15, CLK_IS_CRITICAL),
        GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", "axi_sel", 16),
+       /* infra_sspm_bus_hclk is main clock in co-processor, should not be closed in Linux. */
+       GATE_INFRA2_FLAGS(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk", "axi_sel", 17, CLK_IS_CRITICAL),
        GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", "i2c_sel", 18),
        GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", "i2c_sel", 19),
        GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm", "i2c_sel", 20),
@@ -766,6 +789,10 @@ static const struct mtk_gate infra_clks[] = {
        GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self", "msdc50_0_sel", 0),
        GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self", "msdc50_0_sel", 1),
        GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", "msdc50_0_sel", 2),
+       /* infra_sspm_26m_self is main clock in co-processor, should not be closed in Linux. */
+       GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", "f_f26m_ck", 3, CLK_IS_CRITICAL),
+       /* infra_sspm_32k_self is main clock in co-processor, should not be closed in Linux. */
+       GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "f_f26m_ck", 4, CLK_IS_CRITICAL),
        GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5),
        GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6),
        GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7),
index 8fef90b..6fa7639 100644 (file)
@@ -367,9 +367,9 @@ static int meson_clk_pll_enable(struct clk_hw *hw)
         * 3. enable the lock detect module
         */
        if (MESON_PARM_APPLICABLE(&pll->current_en)) {
-               usleep_range(10, 20);
+               udelay(10);
                meson_parm_write(clk->map, &pll->current_en, 1);
-               usleep_range(40, 50);
+               udelay(40);
        }
 
        if (MESON_PARM_APPLICABLE(&pll->l_detect)) {
index d6d067f..ca60bb8 100644 (file)
@@ -121,6 +121,45 @@ static bool cxl_is_security_command(u16 opcode)
        return false;
 }
 
+static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
+                                        u16 opcode)
+{
+       switch (opcode) {
+       case CXL_MBOX_OP_SANITIZE:
+               set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
+               break;
+       case CXL_MBOX_OP_SECURE_ERASE:
+               set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
+                       security->enabled_cmds);
+               break;
+       case CXL_MBOX_OP_GET_SECURITY_STATE:
+               set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
+                       security->enabled_cmds);
+               break;
+       case CXL_MBOX_OP_SET_PASSPHRASE:
+               set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
+                       security->enabled_cmds);
+               break;
+       case CXL_MBOX_OP_DISABLE_PASSPHRASE:
+               set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
+                       security->enabled_cmds);
+               break;
+       case CXL_MBOX_OP_UNLOCK:
+               set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
+               break;
+       case CXL_MBOX_OP_FREEZE_SECURITY:
+               set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
+                       security->enabled_cmds);
+               break;
+       case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
+               set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
+                       security->enabled_cmds);
+               break;
+       default:
+               break;
+       }
+}
+
 static bool cxl_is_poison_command(u16 opcode)
 {
 #define CXL_MBOX_OP_POISON_CMDS 0x43
@@ -677,7 +716,8 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
                u16 opcode = le16_to_cpu(cel_entry[i].opcode);
                struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
 
-               if (!cmd && !cxl_is_poison_command(opcode)) {
+               if (!cmd && (!cxl_is_poison_command(opcode) ||
+                            !cxl_is_security_command(opcode))) {
                        dev_dbg(dev,
                                "Opcode 0x%04x unsupported by driver\n", opcode);
                        continue;
@@ -689,6 +729,9 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
                if (cxl_is_poison_command(opcode))
                        cxl_set_poison_cmd_enabled(&mds->poison, opcode);
 
+               if (cxl_is_security_command(opcode))
+                       cxl_set_security_cmd_enabled(&mds->security, opcode);
+
                dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
        }
 }
index f99e7ec..14b547c 100644 (file)
@@ -477,9 +477,28 @@ static struct attribute_group cxl_memdev_pmem_attribute_group = {
        .attrs = cxl_memdev_pmem_attributes,
 };
 
+static umode_t cxl_memdev_security_visible(struct kobject *kobj,
+                                          struct attribute *a, int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+       if (a == &dev_attr_security_sanitize.attr &&
+           !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
+               return 0;
+
+       if (a == &dev_attr_security_erase.attr &&
+           !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds))
+               return 0;
+
+       return a->mode;
+}
+
 static struct attribute_group cxl_memdev_security_attribute_group = {
        .name = "security",
        .attrs = cxl_memdev_security_attributes,
+       .is_visible = cxl_memdev_security_visible,
 };
 
 static const struct attribute_group *cxl_memdev_attribute_groups[] = {
index 4991133..706f8a6 100644 (file)
@@ -244,6 +244,19 @@ enum poison_cmd_enabled_bits {
        CXL_POISON_ENABLED_MAX
 };
 
+/* Device enabled security commands */
+enum security_cmd_enabled_bits {
+       CXL_SEC_ENABLED_SANITIZE,
+       CXL_SEC_ENABLED_SECURE_ERASE,
+       CXL_SEC_ENABLED_GET_SECURITY_STATE,
+       CXL_SEC_ENABLED_SET_PASSPHRASE,
+       CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
+       CXL_SEC_ENABLED_UNLOCK,
+       CXL_SEC_ENABLED_FREEZE_SECURITY,
+       CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
+       CXL_SEC_ENABLED_MAX
+};
+
 /**
  * struct cxl_poison_state - Driver poison state info
  *
@@ -346,6 +359,7 @@ struct cxl_fw_state {
  * struct cxl_security_state - Device security state
  *
  * @state: state of last security operation
+ * @enabled_cmds: All security commands enabled in the CEL
  * @poll: polling for sanitization is enabled, device has no mbox irq support
  * @poll_tmo_secs: polling timeout
  * @poll_dwork: polling work item
@@ -353,6 +367,7 @@ struct cxl_fw_state {
  */
 struct cxl_security_state {
        unsigned long state;
+       DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
        bool poll;
        int poll_tmo_secs;
        struct delayed_work poll_dwork;
@@ -434,6 +449,7 @@ struct cxl_dev_state {
  * @next_persistent_bytes: persistent capacity change pending device reset
  * @event: event log driver state
  * @poison: poison driver state info
+ * @security: security driver state info
  * @fw: firmware upload / activation state
  * @mbox_send: @dev specific transport for transmitting mailbox commands
  *
index 1efa5e9..19246ed 100644 (file)
@@ -166,8 +166,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
                return -ENOMEM;
 
        shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
-       if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
+       if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) {
+               of_node_put(shmem);
                return -ENXIO;
+       }
 
        ret = of_address_to_resource(shmem, 0, &res);
        of_node_put(shmem);
index 6971dcf..0493aa3 100644 (file)
@@ -818,10 +818,13 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
         * before sending it with a single RAW xfer.
         */
        if (rd->tx_size < rd->tx_req_size) {
-               size_t cnt;
+               ssize_t cnt;
 
                cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
                                             buf, count);
+               if (cnt < 0)
+                       return cnt;
+
                rd->tx_size += cnt;
                if (cnt < count)
                        return cnt;
index 621c37e..c193516 100644 (file)
@@ -40,6 +40,7 @@
 /**
  * struct scmi_smc - Structure representing a SCMI smc transport
  *
+ * @irq: An optional IRQ for completion
  * @cinfo: SCMI channel info
  * @shmem: Transmit/Receive shared memory area
  * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
@@ -52,6 +53,7 @@
  */
 
 struct scmi_smc {
+       int irq;
        struct scmi_chan_info *cinfo;
        struct scmi_shared_mem __iomem *shmem;
        /* Protect access to shmem area */
@@ -127,7 +129,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
        struct resource res;
        struct device_node *np;
        u32 func_id;
-       int ret, irq;
+       int ret;
 
        if (!tx)
                return -ENODEV;
@@ -137,8 +139,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
                return -ENOMEM;
 
        np = of_parse_phandle(cdev->of_node, "shmem", 0);
-       if (!of_device_is_compatible(np, "arm,scmi-shmem"))
+       if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
+               of_node_put(np);
                return -ENXIO;
+       }
 
        ret = of_address_to_resource(np, 0, &res);
        of_node_put(np);
@@ -167,11 +171,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
         * completion of a message is signaled by an interrupt rather than by
         * the return of the SMC call.
         */
-       irq = of_irq_get_byname(cdev->of_node, "a2p");
-       if (irq > 0) {
-               ret = devm_request_irq(dev, irq, smc_msg_done_isr,
-                                      IRQF_NO_SUSPEND,
-                                      dev_name(dev), scmi_info);
+       scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
+       if (scmi_info->irq > 0) {
+               ret = request_irq(scmi_info->irq, smc_msg_done_isr,
+                                 IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
                if (ret) {
                        dev_err(dev, "failed to setup SCMI smc irq\n");
                        return ret;
@@ -193,6 +196,10 @@ static int smc_chan_free(int id, void *p, void *data)
        struct scmi_chan_info *cinfo = p;
        struct scmi_smc *scmi_info = cinfo->transport_info;
 
+       /* Ignore any possible further reception on the IRQ path */
+       if (scmi_info->irq > 0)
+               free_irq(scmi_info->irq, scmi_info);
+
        cinfo->transport_info = NULL;
        scmi_info->cinfo = NULL;
 
index 890eb45..1990263 100644 (file)
@@ -34,7 +34,6 @@ static struct soc_device_attribute *soc_dev_attr;
 
 static int __init smccc_soc_init(void)
 {
-       struct arm_smccc_res res;
        int soc_id_rev, soc_id_version;
        static char soc_id_str[20], soc_id_rev_str[12];
        static char soc_id_jep106_id_str[12];
@@ -49,13 +48,13 @@ static int __init smccc_soc_init(void)
        }
 
        if (soc_id_version < 0) {
-               pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
+               pr_err("Invalid SoC Version: %x\n", soc_id_version);
                return -EINVAL;
        }
 
        soc_id_rev = arm_smccc_get_soc_id_revision();
        if (soc_id_rev < 0) {
-               pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
+               pr_err("Invalid SoC Revision: %x\n", soc_id_rev);
                return -EINVAL;
        }
 
index 23857cc..2702ad4 100644 (file)
@@ -165,14 +165,60 @@ static u32 preparser_disable(bool state)
        return MI_ARB_CHECK | 1 << 8 | state;
 }
 
-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
+static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
 {
-       u32 gsi_offset = gt->uncore->gsi_offset;
+       switch (engine->id) {
+       case RCS0:
+               return GEN12_CCS_AUX_INV;
+       case BCS0:
+               return GEN12_BCS0_AUX_INV;
+       case VCS0:
+               return GEN12_VD0_AUX_INV;
+       case VCS2:
+               return GEN12_VD2_AUX_INV;
+       case VECS0:
+               return GEN12_VE0_AUX_INV;
+       case CCS0:
+               return GEN12_CCS0_AUX_INV;
+       default:
+               return INVALID_MMIO_REG;
+       }
+}
+
+static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
+{
+       i915_reg_t reg = gen12_get_aux_inv_reg(engine);
+
+       if (IS_PONTEVECCHIO(engine->i915))
+               return false;
+
+       /*
+        * So far platforms supported by i915 having flat ccs do not require
+        * AUX invalidation. Check also whether the engine requires it.
+        */
+       return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
+}
+
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
+{
+       i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
+       u32 gsi_offset = engine->gt->uncore->gsi_offset;
+
+       if (!gen12_needs_ccs_aux_inv(engine))
+               return cs;
 
        *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
        *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
        *cs++ = AUX_INV;
-       *cs++ = MI_NOOP;
+
+       *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+               MI_SEMAPHORE_REGISTER_POLL |
+               MI_SEMAPHORE_POLL |
+               MI_SEMAPHORE_SAD_EQ_SDD;
+       *cs++ = 0;
+       *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
+       *cs++ = 0;
+       *cs++ = 0;
 
        return cs;
 }
@@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
 {
        struct intel_engine_cs *engine = rq->engine;
 
-       if (mode & EMIT_FLUSH) {
-               u32 flags = 0;
+       /*
+        * On Aux CCS platforms the invalidation of the Aux
+        * table requires quiescing memory traffic beforehand
+        */
+       if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
+               u32 bit_group_0 = 0;
+               u32 bit_group_1 = 0;
                int err;
                u32 *cs;
 
@@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
                if (err)
                        return err;
 
-               flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
-               flags |= PIPE_CONTROL_FLUSH_L3;
-               flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-               flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
+
+               /*
+                * When required, in MTL and beyond platforms we
+                * need to set the CCS_FLUSH bit in the pipe control
+                */
+               if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
+                       bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
+
+               bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+               bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
+               bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+               bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
                /* Wa_1409600907:tgl,adl-p */
-               flags |= PIPE_CONTROL_DEPTH_STALL;
-               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
-               flags |= PIPE_CONTROL_FLUSH_ENABLE;
+               bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
+               bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+               bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
 
-               flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-               flags |= PIPE_CONTROL_QW_WRITE;
+               bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
+               bit_group_1 |= PIPE_CONTROL_QW_WRITE;
 
-               flags |= PIPE_CONTROL_CS_STALL;
+               bit_group_1 |= PIPE_CONTROL_CS_STALL;
 
                if (!HAS_3D_PIPELINE(engine->i915))
-                       flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+                       bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
                else if (engine->class == COMPUTE_CLASS)
-                       flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+                       bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
 
                cs = intel_ring_begin(rq, 6);
                if (IS_ERR(cs))
                        return PTR_ERR(cs);
 
-               cs = gen12_emit_pipe_control(cs,
-                                            PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
-                                            flags, LRC_PPHWSP_SCRATCH_ADDR);
+               cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
+                                            LRC_PPHWSP_SCRATCH_ADDR);
                intel_ring_advance(rq, cs);
        }
 
@@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
                else if (engine->class == COMPUTE_CLASS)
                        flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
 
-               if (!HAS_FLAT_CCS(rq->engine->i915))
-                       count = 8 + 4;
-               else
-                       count = 8;
+               count = 8;
+               if (gen12_needs_ccs_aux_inv(rq->engine))
+                       count += 8;
 
                cs = intel_ring_begin(rq, count);
                if (IS_ERR(cs))
@@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
 
                cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
 
-               if (!HAS_FLAT_CCS(rq->engine->i915)) {
-                       /* hsdes: 1809175790 */
-                       cs = gen12_emit_aux_table_inv(rq->engine->gt,
-                                                     cs, GEN12_GFX_CCS_AUX_NV);
-               }
+               cs = gen12_emit_aux_table_inv(engine, cs);
 
                *cs++ = preparser_disable(false);
                intel_ring_advance(rq, cs);
@@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
 
 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
 {
-       intel_engine_mask_t aux_inv = 0;
-       u32 cmd, *cs;
+       u32 cmd = 4;
+       u32 *cs;
 
-       cmd = 4;
        if (mode & EMIT_INVALIDATE) {
                cmd += 2;
 
-               if (!HAS_FLAT_CCS(rq->engine->i915) &&
-                   (rq->engine->class == VIDEO_DECODE_CLASS ||
-                    rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
-                       aux_inv = rq->engine->mask &
-                               ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
-                       if (aux_inv)
-                               cmd += 4;
-               }
+               if (gen12_needs_ccs_aux_inv(rq->engine))
+                       cmd += 8;
        }
 
        cs = intel_ring_begin(rq, cmd);
@@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
                cmd |= MI_INVALIDATE_TLB;
                if (rq->engine->class == VIDEO_DECODE_CLASS)
                        cmd |= MI_INVALIDATE_BSD;
+
+               if (gen12_needs_ccs_aux_inv(rq->engine) &&
+                   rq->engine->class == COPY_ENGINE_CLASS)
+                       cmd |= MI_FLUSH_DW_CCS;
        }
 
        *cs++ = cmd;
@@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
        *cs++ = 0; /* upper addr */
        *cs++ = 0; /* value */
 
-       if (aux_inv) { /* hsdes: 1809175790 */
-               if (rq->engine->class == VIDEO_DECODE_CLASS)
-                       cs = gen12_emit_aux_table_inv(rq->engine->gt,
-                                                     cs, GEN12_VD0_AUX_NV);
-               else
-                       cs = gen12_emit_aux_table_inv(rq->engine->gt,
-                                                     cs, GEN12_VE0_AUX_NV);
-       }
+       cs = gen12_emit_aux_table_inv(rq->engine, cs);
 
        if (mode & EMIT_INVALIDATE)
                *cs++ = preparser_disable(false);
index 655e5c0..867ba69 100644 (file)
@@ -13,6 +13,7 @@
 #include "intel_gt_regs.h"
 #include "intel_gpu_commands.h"
 
+struct intel_engine_cs;
 struct intel_gt;
 struct i915_request;
 
@@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
 
-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
 
 static inline u32 *
-__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0,
+                        u32 bit_group_1, u32 offset)
 {
        memset(batch, 0, 6 * sizeof(u32));
 
-       batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
-       batch[1] = flags1;
+       batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
+       batch[1] = bit_group_1;
        batch[2] = offset;
 
        return batch + 6;
 }
 
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+static inline u32 *gen8_emit_pipe_control(u32 *batch,
+                                         u32 bit_group_1, u32 offset)
 {
-       return __gen8_emit_pipe_control(batch, 0, flags, offset);
+       return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset);
 }
 
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0,
+                                          u32 bit_group_1, u32 offset)
 {
-       return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+       return __gen8_emit_pipe_control(batch, bit_group_0,
+                                       bit_group_1, offset);
 }
 
 static inline u32 *
index 5d143e2..2bd8d98 100644 (file)
 #define   MI_SEMAPHORE_TARGET(engine)  ((engine)<<15)
 #define MI_SEMAPHORE_WAIT      MI_INSTR(0x1c, 2) /* GEN8+ */
 #define MI_SEMAPHORE_WAIT_TOKEN        MI_INSTR(0x1c, 3) /* GEN12+ */
+#define   MI_SEMAPHORE_REGISTER_POLL   (1 << 16)
 #define   MI_SEMAPHORE_POLL            (1 << 15)
 #define   MI_SEMAPHORE_SAD_GT_SDD      (0 << 12)
 #define   MI_SEMAPHORE_SAD_GTE_SDD     (1 << 12)
 #define   PIPE_CONTROL_QW_WRITE                                (1<<14)
 #define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
 #define   PIPE_CONTROL_DEPTH_STALL                     (1<<13)
+#define   PIPE_CONTROL_CCS_FLUSH                       (1<<13) /* MTL+ */
 #define   PIPE_CONTROL_WRITE_FLUSH                     (1<<12)
 #define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH       (1<<12) /* gen6+ */
 #define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE    (1<<11) /* MBZ on ILK */
index 718cb2c..2cdfb2f 100644 (file)
 #define GEN8_PRIVATE_PAT_HI                    _MMIO(0x40e0 + 4)
 #define GEN10_PAT_INDEX(index)                 _MMIO(0x40e0 + (index) * 4)
 #define BSD_HWS_PGA_GEN7                       _MMIO(0x4180)
-#define GEN12_GFX_CCS_AUX_NV                   _MMIO(0x4208)
-#define GEN12_VD0_AUX_NV                       _MMIO(0x4218)
-#define GEN12_VD1_AUX_NV                       _MMIO(0x4228)
+
+#define GEN12_CCS_AUX_INV                      _MMIO(0x4208)
+#define GEN12_VD0_AUX_INV                      _MMIO(0x4218)
+#define GEN12_VE0_AUX_INV                      _MMIO(0x4238)
+#define GEN12_BCS0_AUX_INV                     _MMIO(0x4248)
 
 #define GEN8_RTCR                              _MMIO(0x4260)
 #define GEN8_M1TCR                             _MMIO(0x4264)
 #define GEN8_BTCR                              _MMIO(0x426c)
 #define GEN8_VTCR                              _MMIO(0x4270)
 
-#define GEN12_VD2_AUX_NV                       _MMIO(0x4298)
-#define GEN12_VD3_AUX_NV                       _MMIO(0x42a8)
-#define GEN12_VE0_AUX_NV                       _MMIO(0x4238)
-
 #define BLT_HWS_PGA_GEN7                       _MMIO(0x4280)
 
-#define GEN12_VE1_AUX_NV                       _MMIO(0x42b8)
+#define GEN12_VD2_AUX_INV                      _MMIO(0x4298)
+#define GEN12_CCS0_AUX_INV                     _MMIO(0x42c8)
 #define   AUX_INV                              REG_BIT(0)
+
 #define VEBOX_HWS_PGA_GEN7                     _MMIO(0x4380)
 
 #define GEN12_AUX_ERR_DBG                      _MMIO(0x43f4)
index a4ec20a..9477c24 100644 (file)
@@ -1364,10 +1364,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
            IS_DG2_G11(ce->engine->i915))
                cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
 
-       /* hsdes: 1809175790 */
-       if (!HAS_FLAT_CCS(ce->engine->i915))
-               cs = gen12_emit_aux_table_inv(ce->engine->gt,
-                                             cs, GEN12_GFX_CCS_AUX_NV);
+       cs = gen12_emit_aux_table_inv(ce->engine, cs);
 
        /* Wa_16014892111 */
        if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
@@ -1392,17 +1389,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
                                                    PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
                                                    0);
 
-       /* hsdes: 1809175790 */
-       if (!HAS_FLAT_CCS(ce->engine->i915)) {
-               if (ce->engine->class == VIDEO_DECODE_CLASS)
-                       cs = gen12_emit_aux_table_inv(ce->engine->gt,
-                                                     cs, GEN12_VD0_AUX_NV);
-               else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
-                       cs = gen12_emit_aux_table_inv(ce->engine->gt,
-                                                     cs, GEN12_VE0_AUX_NV);
-       }
-
-       return cs;
+       return gen12_emit_aux_table_inv(ce->engine, cs);
 }
 
 static void
index 2a0438f..af9afdb 100644 (file)
@@ -491,7 +491,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
                return;
        }
 
-       msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, reg);
+       msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value);
 
        // check the msg in DATA register.
        msg = vgpu_vreg(vgpu, offset + 4);
index 8ef9388..5ec2930 100644 (file)
@@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
                }
        } while (unlikely(is_barrier(active)));
 
-       if (!__i915_active_fence_set(active, fence))
+       fence = __i915_active_fence_set(active, fence);
+       if (!fence)
                __i915_active_acquire(ref);
+       else
+               dma_fence_put(fence);
 
 out:
        i915_active_release(ref);
@@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref,
                return NULL;
        }
 
-       rcu_read_lock();
        prev = __i915_active_fence_set(active, fence);
-       if (prev)
-               prev = dma_fence_get_rcu(prev);
-       else
+       if (!prev)
                __i915_active_acquire(ref);
-       rcu_read_unlock();
 
        return prev;
 }
@@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
  *
  * Records the new @fence as the last active fence along its timeline in
  * this active tracker, moving the tracking callbacks from the previous
- * fence onto this one. Returns the previous fence (if not already completed),
- * which the caller must ensure is executed before the new fence. To ensure
- * that the order of fences within the timeline of the i915_active_fence is
- * understood, it should be locked by the caller.
+ * fence onto this one. Gets and returns a reference to the previous fence
+ * (if not already completed), which the caller must put after making sure
+ * that it is executed before the new fence. To ensure that the order of
+ * fences within the timeline of the i915_active_fence is understood, it
+ * should be locked by the caller.
  */
 struct dma_fence *
 __i915_active_fence_set(struct i915_active_fence *active,
@@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
        struct dma_fence *prev;
        unsigned long flags;
 
-       if (fence == rcu_access_pointer(active->fence))
+       /*
+        * In case of fences embedded in i915_requests, their memory is
+        * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
+        * by new requests.  Then, there is a risk of passing back a pointer
+        * to a new, completely unrelated fence that reuses the same memory
+        * while tracked under a different active tracker.  Combined with i915
+        * perf open/close operations that build await dependencies between
+        * engine kernel context requests and user requests from different
+        * timelines, this can lead to dependency loops and infinite waits.
+        *
+        * As a countermeasure, we try to get a reference to the active->fence
+        * first, so if we succeed and pass it back to our user then it is not
+        * released and potentially reused by an unrelated request before the
+        * user has a chance to set up an await dependency on it.
+        */
+       prev = i915_active_fence_get(active);
+       if (fence == prev)
                return fence;
 
        GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
@@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
         * Consider that we have two threads arriving (A and B), with
         * C already resident as the active->fence.
         *
-        * A does the xchg first, and so it sees C or NULL depending
-        * on the timing of the interrupt handler. If it is NULL, the
-        * previous fence must have been signaled and we know that
-        * we are first on the timeline. If it is still present,
-        * we acquire the lock on that fence and serialise with the interrupt
-        * handler, in the process removing it from any future interrupt
-        * callback. A will then wait on C before executing (if present).
-        *
-        * As B is second, it sees A as the previous fence and so waits for
-        * it to complete its transition and takes over the occupancy for
-        * itself -- remembering that it needs to wait on A before executing.
+        * Both A and B have got a reference to C or NULL, depending on the
+        * timing of the interrupt handler.  Let's assume that if A has got C
+        * then it has locked C first (before B).
         *
         * Note the strong ordering of the timeline also provides consistent
         * nesting rules for the fence->lock; the inner lock is always the
         * older lock.
         */
        spin_lock_irqsave(fence->lock, flags);
-       prev = xchg(__active_fence_slot(active), fence);
-       if (prev) {
-               GEM_BUG_ON(prev == fence);
+       if (prev)
                spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+
+       /*
+        * A does the cmpxchg first, and so it sees C or NULL, as before, or
+        * something else, depending on the timing of other threads and/or
+        * interrupt handler.  If not the same as before then A unlocks C if
+        * applicable and retries, starting from an attempt to get a new
+        * active->fence.  Meanwhile, B follows the same path as A.
+        * Once A succeeds with cmpxch, B fails again, retires, gets A from
+        * active->fence, locks it as soon as A completes, and possibly
+        * succeeds with cmpxchg.
+        */
+       while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
+               if (prev) {
+                       spin_unlock(prev->lock);
+                       dma_fence_put(prev);
+               }
+               spin_unlock_irqrestore(fence->lock, flags);
+
+               prev = i915_active_fence_get(active);
+               GEM_BUG_ON(prev == fence);
+
+               spin_lock_irqsave(fence->lock, flags);
+               if (prev)
+                       spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+       }
+
+       /*
+        * If prev is NULL then the previous fence must have been signaled
+        * and we know that we are first on the timeline.  If it is still
+        * present then, having the lock on that fence already acquired, we
+        * serialise with the interrupt handler, in the process of removing it
+        * from any future interrupt callback.  A will then wait on C before
+        * executing (if present).
+        *
+        * As B is second, it sees A as the previous fence and so waits for
+        * it to complete its transition and takes over the occupancy for
+        * itself -- remembering that it needs to wait on A before executing.
+        */
+       if (prev) {
                __list_del_entry(&active->cb.node);
                spin_unlock(prev->lock); /* serialise with prev->cb_list */
        }
@@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
        int err = 0;
 
        /* Must maintain timeline ordering wrt previous active requests */
-       rcu_read_lock();
        fence = __i915_active_fence_set(active, &rq->fence);
-       if (fence) /* but the previous fence may not belong to that timeline! */
-               fence = dma_fence_get_rcu(fence);
-       rcu_read_unlock();
        if (fence) {
                err = i915_request_await_dma_fence(rq, fence);
                dma_fence_put(fence);
index 894068b..833b73e 100644 (file)
@@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
 
        request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
 
+       /*
+        * Users have to put a reference potentially got by
+        * __i915_active_fence_set() to the returned request
+        * when no longer needed
+        */
        return to_request(__i915_active_fence_set(&timeline->last_request,
                                                  &rq->fence));
 }
@@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
                                                         0);
        }
 
+       /*
+        * Users have to put the reference to prev potentially got
+        * by __i915_active_fence_set() when no longer needed
+        */
        return prev;
 }
 
@@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
                prev = __i915_request_ensure_ordering(rq, timeline);
        else
                prev = __i915_request_ensure_parallel_ordering(rq, timeline);
+       if (prev)
+               i915_request_put(prev);
 
        /*
         * Make sure that no request gazumped us - if it was allocated after
index 5f26090..89585b3 100644 (file)
@@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
                dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n",
                         sig_cfg.mode.hactive, new_hactive);
 
-               sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive;
+               sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive;
                sig_cfg.mode.hactive = new_hactive;
        }
 
index 102e1fc..be4ec5b 100644 (file)
@@ -569,6 +569,7 @@ static const struct of_device_id s6d7aa0_of_match[] = {
        },
        { /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, s6d7aa0_of_match);
 
 static struct mipi_dsi_driver s6d7aa0_driver = {
        .probe = s6d7aa0_probe,
index 7139a52..54e3083 100644 (file)
@@ -519,7 +519,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 
        if (bo->pin_count) {
                *locked = false;
-               *busy = false;
+               if (busy)
+                       *busy = false;
                return false;
        }
 
index 5978e9d..ebf15f3 100644 (file)
@@ -209,8 +209,7 @@ int vmbus_connect(void)
         * Setup the vmbus event connection for channel interrupt
         * abstraction stuff
         */
-       vmbus_connection.int_page =
-       (void *)hv_alloc_hyperv_zeroed_page();
+       vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
        if (vmbus_connection.int_page == NULL) {
                ret = -ENOMEM;
                goto cleanup;
@@ -225,8 +224,8 @@ int vmbus_connect(void)
         * Setup the monitor notification facility. The 1st page for
         * parent->child and the 2nd page for child->parent
         */
-       vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_page();
-       vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_page();
+       vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
+       vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
        if ((vmbus_connection.monitor_pages[0] == NULL) ||
            (vmbus_connection.monitor_pages[1] == NULL)) {
                ret = -ENOMEM;
@@ -333,15 +332,15 @@ void vmbus_disconnect(void)
                destroy_workqueue(vmbus_connection.work_queue);
 
        if (vmbus_connection.int_page) {
-               hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
+               hv_free_hyperv_page(vmbus_connection.int_page);
                vmbus_connection.int_page = NULL;
        }
 
        set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
        set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
 
-       hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
-       hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
+       hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+       hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
        vmbus_connection.monitor_pages[0] = NULL;
        vmbus_connection.monitor_pages[1] = NULL;
 }
index dffcc89..0d7a3ba 100644 (file)
@@ -1628,7 +1628,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
        WARN_ON_ONCE(nents > HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES);
        WARN_ON_ONCE(sgl->length < (HV_HYP_PAGE_SIZE << page_reporting_order));
        local_irq_save(flags);
-       hint = *(struct hv_memory_hint **)this_cpu_ptr(hyperv_pcpu_input_arg);
+       hint = *this_cpu_ptr(hyperv_pcpu_input_arg);
        if (!hint) {
                local_irq_restore(flags);
                return -ENOSPC;
index 542a1d5..6a2258f 100644 (file)
@@ -115,12 +115,12 @@ void *hv_alloc_hyperv_zeroed_page(void)
 }
 EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
 
-void hv_free_hyperv_page(unsigned long addr)
+void hv_free_hyperv_page(void *addr)
 {
        if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
-               free_page(addr);
+               free_page((unsigned long)addr);
        else
-               kfree((void *)addr);
+               kfree(addr);
 }
 EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
 
@@ -253,7 +253,7 @@ static void hv_kmsg_dump_unregister(void)
        atomic_notifier_chain_unregister(&panic_notifier_list,
                                         &hyperv_panic_report_block);
 
-       hv_free_hyperv_page((unsigned long)hv_panic_page);
+       hv_free_hyperv_page(hv_panic_page);
        hv_panic_page = NULL;
 }
 
@@ -270,7 +270,7 @@ static void hv_kmsg_dump_register(void)
        ret = kmsg_dump_register(&hv_kmsg_dumper);
        if (ret) {
                pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
-               hv_free_hyperv_page((unsigned long)hv_panic_page);
+               hv_free_hyperv_page(hv_panic_page);
                hv_panic_page = NULL;
        }
 }
index c0331b2..fe391de 100644 (file)
@@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
                *z1t = cpu_to_le16(new_z1);     /* now send data */
                if (bch->tx_idx < bch->tx_skb->len)
                        return;
-               dev_kfree_skb(bch->tx_skb);
+               dev_kfree_skb_any(bch->tx_skb);
                if (get_next_bframe(bch))
                        goto next_t_frame;
                return;
@@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
        }
        bz->za[new_f1].z1 = cpu_to_le16(new_z1);        /* for next buffer */
        bz->f1 = new_f1;        /* next frame */
-       dev_kfree_skb(bch->tx_skb);
+       dev_kfree_skb_any(bch->tx_skb);
        get_next_bframe(bch);
 }
 
@@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch)
        if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
                hfcpci_fill_fifo(bch);
        else {
-               dev_kfree_skb(bch->tx_skb);
+               dev_kfree_skb_any(bch->tx_skb);
                if (get_next_bframe(bch))
                        hfcpci_fill_fifo(bch);
        }
@@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
                return 0;
 
        if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
-               spin_lock(&hc->lock);
+               spin_lock_irq(&hc->lock);
                bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
                if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
                        main_rec_hfcpci(bch);
@@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
                        main_rec_hfcpci(bch);
                        tx_birq(bch);
                }
-               spin_unlock(&hc->lock);
+               spin_unlock_irq(&hc->lock);
        }
        return 0;
 }
index 4a750da..deb6e65 100644 (file)
@@ -755,6 +755,43 @@ const char *const tegra_mc_error_names[8] = {
        [6] = "SMMU translation error",
 };
 
+struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data)
+{
+       struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+       struct icc_node *node;
+
+       list_for_each_entry(node, &mc->provider.nodes, node_list) {
+               if (node->id == spec->args[0])
+                       return node;
+       }
+
+       /*
+        * If a client driver calls devm_of_icc_get() before the MC driver
+        * is probed, then return EPROBE_DEFER to the client driver.
+        */
+       return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int tegra_mc_icc_get(struct icc_node *node, u32 *average, u32 *peak)
+{
+       *average = 0;
+       *peak = 0;
+
+       return 0;
+}
+
+static int tegra_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+       return 0;
+}
+
+const struct tegra_mc_icc_ops tegra_mc_icc_ops = {
+       .xlate = tegra_mc_icc_xlate,
+       .aggregate = icc_std_aggregate,
+       .get_bw = tegra_mc_icc_get,
+       .set = tegra_mc_icc_set,
+};
+
 /*
  * Memory Controller (MC) has few Memory Clients that are issuing memory
  * bandwidth allocation requests to the MC interconnect provider. The MC
index b2416ee..26035ac 100644 (file)
@@ -1355,6 +1355,7 @@ const struct tegra_mc_soc tegra194_mc_soc = {
                   MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
        .has_addr_hi_reg = true,
        .ops = &tegra186_mc_ops,
+       .icc_ops = &tegra_mc_icc_ops,
        .ch_intmask = 0x00000f00,
        .global_intstatus_channel_shift = 8,
 };
index 8e873a7..8fb83b3 100644 (file)
@@ -827,7 +827,7 @@ static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst)
                return 0;
 
        if (!mc->bwmgr_mrq_supported)
-               return -EINVAL;
+               return 0;
 
        if (!mc->bpmp) {
                dev_err(mc->dev, "BPMP reference NULL\n");
@@ -874,7 +874,7 @@ static int tegra234_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
        struct tegra_mc *mc = icc_provider_to_tegra_mc(p);
 
        if (!mc->bwmgr_mrq_supported)
-               return -EINVAL;
+               return 0;
 
        if (node->id == TEGRA_ICC_MC_CPU_CLUSTER0 ||
            node->id == TEGRA_ICC_MC_CPU_CLUSTER1 ||
@@ -889,27 +889,6 @@ static int tegra234_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
        return 0;
 }
 
-static struct icc_node*
-tegra234_mc_of_icc_xlate(struct of_phandle_args *spec, void *data)
-{
-       struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
-       unsigned int cl_id = spec->args[0];
-       struct icc_node *node;
-
-       list_for_each_entry(node, &mc->provider.nodes, node_list) {
-               if (node->id != cl_id)
-                       continue;
-
-               return node;
-       }
-
-       /*
-        * If a client driver calls devm_of_icc_get() before the MC driver
-        * is probed, then return EPROBE_DEFER to the client driver.
-        */
-       return ERR_PTR(-EPROBE_DEFER);
-}
-
 static int tegra234_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
 {
        *avg = 0;
@@ -919,7 +898,7 @@ static int tegra234_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *pea
 }
 
 static const struct tegra_mc_icc_ops tegra234_mc_icc_ops = {
-       .xlate = tegra234_mc_of_icc_xlate,
+       .xlate = tegra_mc_icc_xlate,
        .aggregate = tegra234_mc_icc_aggregate,
        .get_bw = tegra234_mc_icc_get_init_bw,
        .set = tegra234_mc_icc_set,
index 0864261..7366e85 100644 (file)
@@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
        unsigned int i;
        int ret;
 
-       if (op->cs > NAND_MAX_CHIPS)
+       if (op->cs >= NAND_MAX_CHIPS)
                return -EINVAL;
 
        if (check_only)
index d3faf80..b10011d 100644 (file)
@@ -1278,7 +1278,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
        struct meson_nfc *nfc = nand_get_controller_data(nand);
        struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
        struct mtd_info *mtd = nand_to_mtd(nand);
-       int nsectors = mtd->writesize / 1024;
        int raw_writesize;
        int ret;
 
@@ -1304,7 +1303,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
        nand->options |= NAND_NO_SUBPAGE_WRITE;
 
        ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
-                                  mtd->oobsize - 2 * nsectors);
+                                  mtd->oobsize - 2);
        if (ret) {
                dev_err(nfc->dev, "failed to ECC init\n");
                return -EINVAL;
index 6e1eac6..4a97d4a 100644 (file)
@@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info,
                        switch (info->bch_type) {
                        case BCH8_ECC:
                                /* syndrome fragment 0 = ecc[9-12B] */
-                               val = cpu_to_be32(*(u32 *) &ecc[9]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
                                elm_write_reg(info, offset, val);
 
                                /* syndrome fragment 1 = ecc[5-8B] */
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[5]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
                                elm_write_reg(info, offset, val);
 
                                /* syndrome fragment 2 = ecc[1-4B] */
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[1]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
                                elm_write_reg(info, offset, val);
 
                                /* syndrome fragment 3 = ecc[0B] */
@@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info,
                                break;
                        case BCH4_ECC:
                                /* syndrome fragment 0 = ecc[20-52b] bits */
-                               val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+                               val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
                                        ((ecc[2] & 0xf) << 28);
                                elm_write_reg(info, offset, val);
 
                                /* syndrome fragment 1 = ecc[0-20b] bits */
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
                                elm_write_reg(info, offset, val);
                                break;
                        case BCH16_ECC:
-                               val = cpu_to_be32(*(u32 *) &ecc[22]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
                                elm_write_reg(info, offset, val);
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[18]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
                                elm_write_reg(info, offset, val);
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[14]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
                                elm_write_reg(info, offset, val);
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[10]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
                                elm_write_reg(info, offset, val);
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[6]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
                                elm_write_reg(info, offset, val);
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[2]);
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
                                elm_write_reg(info, offset, val);
                                offset += 4;
-                               val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
+                               val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
                                elm_write_reg(info, offset, val);
                                break;
                        default:
index 2312e27..5a04680 100644 (file)
@@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
                 *    BBM  OOB1 OOB2 OOB3 |......|  PA0  PA1  PA2  PA3
                 *
                 * The rk_nfc_ooblayout_free() function already has reserved
-                * these 4 bytes with:
+                * these 4 bytes together with 2 bytes for BBM
+                * by reducing it's length:
                 *
-                * oob_region->offset = NFC_SYS_DATA_SIZE + 2;
+                * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
                 */
                if (!i)
                        memcpy(rk_nfc_oob_ptr(chip, i),
@@ -597,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
        int pages_per_blk = mtd->erasesize / mtd->writesize;
        int ret = 0, i, boot_rom_mode = 0;
        dma_addr_t dma_data, dma_oob;
-       u32 reg;
+       u32 tmp;
        u8 *oob;
 
        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
@@ -624,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
         *
         *   0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
         *
+        * The code here just swaps the first 4 bytes with the last
+        * 4 bytes without losing any data.
+        *
+        * The chip->oob_poi data layout:
+        *
+        *    BBM  OOB1 OOB2 OOB3 |......|  PA0  PA1  PA2  PA3
+        *
         * Configure the ECC algorithm supported by the boot ROM.
         */
        if ((page < (pages_per_blk * rknand->boot_blks)) &&
@@ -634,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
        }
 
        for (i = 0; i < ecc->steps; i++) {
-               if (!i) {
-                       reg = 0xFFFFFFFF;
-               } else {
+               if (!i)
+                       oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
+               else
                        oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
-                       reg = oob[0] | oob[1] << 8 | oob[2] << 16 |
-                             oob[3] << 24;
-               }
 
-               if (!i && boot_rom_mode)
-                       reg = (page & (pages_per_blk - 1)) * 4;
+               tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24;
 
                if (nfc->cfg->type == NFC_V9)
-                       nfc->oob_buf[i] = reg;
+                       nfc->oob_buf[i] = tmp;
                else
-                       nfc->oob_buf[i * (oob_step / 4)] = reg;
+                       nfc->oob_buf[i * (oob_step / 4)] = tmp;
        }
 
        dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
@@ -811,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
                goto timeout_err;
        }
 
-       for (i = 1; i < ecc->steps; i++) {
-               oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+       for (i = 0; i < ecc->steps; i++) {
+               if (!i)
+                       oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
+               else
+                       oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
+
                if (nfc->cfg->type == NFC_V9)
                        tmp = nfc->oob_buf[i];
                else
                        tmp = nfc->oob_buf[i * (oob_step / 4)];
+
                *oob++ = (u8)tmp;
                *oob++ = (u8)(tmp >> 8);
                *oob++ = (u8)(tmp >> 16);
@@ -933,12 +942,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
        if (section)
                return -ERANGE;
 
-       /*
-        * The beginning of the OOB area stores the reserved data for the NFC,
-        * the size of the reserved data is NFC_SYS_DATA_SIZE bytes.
-        */
        oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
-       oob_region->offset = NFC_SYS_DATA_SIZE + 2;
+       oob_region->offset = 2;
 
        return 0;
 }
index 7380b1e..a80427c 100644 (file)
@@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
 {
        struct nand_device *nand = spinand_to_nand(spinand);
        u8 mbf = 0;
-       struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+       struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
 
        switch (status & STATUS_ECC_MASK) {
        case STATUS_ECC_NO_BITFLIPS:
@@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
                if (spi_mem_exec_op(spinand->spimem, &op))
                        return nanddev_get_ecc_conf(nand)->strength;
 
-               mbf >>= 4;
+               mbf = *(spinand->scratchbuf) >> 4;
 
                if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
                        return nanddev_get_ecc_conf(nand)->strength;
index 3ad58cd..f507e37 100644 (file)
@@ -108,7 +108,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
 {
        struct nand_device *nand = spinand_to_nand(spinand);
        u8 mbf = 0;
-       struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+       struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
 
        switch (status & STATUS_ECC_MASK) {
        case STATUS_ECC_NO_BITFLIPS:
@@ -126,7 +126,7 @@ static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
                if (spi_mem_exec_op(spinand->spimem, &op))
                        return nanddev_get_ecc_conf(nand)->strength;
 
-               mbf >>= 4;
+               mbf = *(spinand->scratchbuf) >> 4;
 
                if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
                        return nanddev_get_ecc_conf(nand)->strength;
index 36876aa..15f9a80 100644 (file)
@@ -361,7 +361,7 @@ static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor,
  */
 static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
 {
-       struct spi_mem_op op = {};
+       struct spi_mem_op op;
        u8 addr_mode;
        int ret;
 
@@ -492,7 +492,7 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
                          const struct sfdp_parameter_header *bfpt_header,
                          const struct sfdp_bfpt *bfpt)
 {
-       struct spi_mem_op op = {};
+       struct spi_mem_op op;
        int ret;
 
        ret = cypress_nor_set_addr_mode_nbytes(nor);
index cde253d..72374b0 100644 (file)
@@ -1436,7 +1436,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        if (IS_ERR(priv->clk))
                return PTR_ERR(priv->clk);
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
        if (IS_ERR(priv->clk_mdiv)) {
@@ -1444,7 +1446,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
                goto out_clk;
        }
 
-       clk_prepare_enable(priv->clk_mdiv);
+       ret = clk_prepare_enable(priv->clk_mdiv);
+       if (ret)
+               goto out_clk;
 
        ret = bcm_sf2_sw_rst(priv);
        if (ret) {
index b18cd17..6c0623f 100644 (file)
@@ -635,10 +635,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
        regmap_reg_range(0x1030, 0x1030),
        regmap_reg_range(0x1100, 0x1115),
        regmap_reg_range(0x111a, 0x111f),
-       regmap_reg_range(0x1122, 0x1127),
-       regmap_reg_range(0x112a, 0x112b),
-       regmap_reg_range(0x1136, 0x1139),
-       regmap_reg_range(0x113e, 0x113f),
+       regmap_reg_range(0x1120, 0x112b),
+       regmap_reg_range(0x1134, 0x113b),
+       regmap_reg_range(0x113c, 0x113f),
        regmap_reg_range(0x1400, 0x1401),
        regmap_reg_range(0x1403, 0x1403),
        regmap_reg_range(0x1410, 0x1417),
@@ -669,10 +668,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
        regmap_reg_range(0x2030, 0x2030),
        regmap_reg_range(0x2100, 0x2115),
        regmap_reg_range(0x211a, 0x211f),
-       regmap_reg_range(0x2122, 0x2127),
-       regmap_reg_range(0x212a, 0x212b),
-       regmap_reg_range(0x2136, 0x2139),
-       regmap_reg_range(0x213e, 0x213f),
+       regmap_reg_range(0x2120, 0x212b),
+       regmap_reg_range(0x2134, 0x213b),
+       regmap_reg_range(0x213c, 0x213f),
        regmap_reg_range(0x2400, 0x2401),
        regmap_reg_range(0x2403, 0x2403),
        regmap_reg_range(0x2410, 0x2417),
@@ -703,10 +701,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
        regmap_reg_range(0x3030, 0x3030),
        regmap_reg_range(0x3100, 0x3115),
        regmap_reg_range(0x311a, 0x311f),
-       regmap_reg_range(0x3122, 0x3127),
-       regmap_reg_range(0x312a, 0x312b),
-       regmap_reg_range(0x3136, 0x3139),
-       regmap_reg_range(0x313e, 0x313f),
+       regmap_reg_range(0x3120, 0x312b),
+       regmap_reg_range(0x3134, 0x313b),
+       regmap_reg_range(0x313c, 0x313f),
        regmap_reg_range(0x3400, 0x3401),
        regmap_reg_range(0x3403, 0x3403),
        regmap_reg_range(0x3410, 0x3417),
@@ -737,10 +734,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
        regmap_reg_range(0x4030, 0x4030),
        regmap_reg_range(0x4100, 0x4115),
        regmap_reg_range(0x411a, 0x411f),
-       regmap_reg_range(0x4122, 0x4127),
-       regmap_reg_range(0x412a, 0x412b),
-       regmap_reg_range(0x4136, 0x4139),
-       regmap_reg_range(0x413e, 0x413f),
+       regmap_reg_range(0x4120, 0x412b),
+       regmap_reg_range(0x4134, 0x413b),
+       regmap_reg_range(0x413c, 0x413f),
        regmap_reg_range(0x4400, 0x4401),
        regmap_reg_range(0x4403, 0x4403),
        regmap_reg_range(0x4410, 0x4417),
@@ -771,10 +767,9 @@ static const struct regmap_range ksz9477_valid_regs[] = {
        regmap_reg_range(0x5030, 0x5030),
        regmap_reg_range(0x5100, 0x5115),
        regmap_reg_range(0x511a, 0x511f),
-       regmap_reg_range(0x5122, 0x5127),
-       regmap_reg_range(0x512a, 0x512b),
-       regmap_reg_range(0x5136, 0x5139),
-       regmap_reg_range(0x513e, 0x513f),
+       regmap_reg_range(0x5120, 0x512b),
+       regmap_reg_range(0x5134, 0x513b),
+       regmap_reg_range(0x513c, 0x513f),
        regmap_reg_range(0x5400, 0x5401),
        regmap_reg_range(0x5403, 0x5403),
        regmap_reg_range(0x5410, 0x5417),
index e5b54e6..1eb490c 100644 (file)
@@ -633,12 +633,13 @@ tx_kick_pending:
        return NETDEV_TX_OK;
 }
 
-static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 {
        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
        struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
        u16 cons = txr->tx_cons;
        struct pci_dev *pdev = bp->pdev;
+       int nr_pkts = bnapi->tx_pkts;
        int i;
        unsigned int tx_bytes = 0;
 
@@ -688,6 +689,7 @@ next_tx_int:
                dev_kfree_skb_any(skb);
        }
 
+       bnapi->tx_pkts = 0;
        WRITE_ONCE(txr->tx_cons, cons);
 
        __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
@@ -697,17 +699,24 @@ next_tx_int:
 
 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
                                         struct bnxt_rx_ring_info *rxr,
+                                        unsigned int *offset,
                                         gfp_t gfp)
 {
        struct device *dev = &bp->pdev->dev;
        struct page *page;
 
-       page = page_pool_dev_alloc_pages(rxr->page_pool);
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+               page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
+                                               BNXT_RX_PAGE_SIZE);
+       } else {
+               page = page_pool_dev_alloc_pages(rxr->page_pool);
+               *offset = 0;
+       }
        if (!page)
                return NULL;
 
-       *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
-                                     DMA_ATTR_WEAK_ORDERING);
+       *mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
+                                     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
        if (dma_mapping_error(dev, *mapping)) {
                page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
@@ -747,15 +756,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        dma_addr_t mapping;
 
        if (BNXT_RX_PAGE_MODE(bp)) {
+               unsigned int offset;
                struct page *page =
-                       __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+                       __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
 
                if (!page)
                        return -ENOMEM;
 
                mapping += bp->rx_dma_offset;
                rx_buf->data = page;
-               rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+               rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
        } else {
                u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
 
@@ -815,7 +825,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        unsigned int offset = 0;
 
        if (BNXT_RX_PAGE_MODE(bp)) {
-               page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+               page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
 
                if (!page)
                        return -ENOMEM;
@@ -962,15 +972,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
                return NULL;
        }
        dma_addr -= bp->rx_dma_offset;
-       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
-                            DMA_ATTR_WEAK_ORDERING);
-       skb = build_skb(page_address(page), PAGE_SIZE);
+       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+                            bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+       skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
        if (!skb) {
                page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        }
        skb_mark_for_recycle(skb);
-       skb_reserve(skb, bp->rx_dma_offset);
+       skb_reserve(skb, bp->rx_offset);
        __skb_put(skb, len);
 
        return skb;
@@ -996,8 +1006,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
                return NULL;
        }
        dma_addr -= bp->rx_dma_offset;
-       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
-                            DMA_ATTR_WEAK_ORDERING);
+       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+                            bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
 
        if (unlikely(!payload))
                payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1010,7 +1020,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 
        skb_mark_for_recycle(skb);
        off = (void *)data_ptr - page_address(page);
-       skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+       skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
        memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
               payload + NET_IP_ALIGN);
 
@@ -1141,7 +1151,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
 
        skb->data_len += total_frag_len;
        skb->len += total_frag_len;
-       skb->truesize += PAGE_SIZE * agg_bufs;
+       skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
        return skb;
 }
 
@@ -2569,12 +2579,11 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        return rx_pkts;
 }
 
-static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
+static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
+                                 int budget)
 {
-       if (bnapi->tx_pkts) {
-               bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
-               bnapi->tx_pkts = 0;
-       }
+       if (bnapi->tx_pkts)
+               bnapi->tx_int(bp, bnapi, budget);
 
        if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
                struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
@@ -2603,7 +2612,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
         */
        bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
 
-       __bnxt_poll_work_done(bp, bnapi);
+       __bnxt_poll_work_done(bp, bnapi, budget);
        return rx_pkts;
 }
 
@@ -2734,7 +2743,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 }
 
 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
-                                u64 dbr_type)
+                                u64 dbr_type, int budget)
 {
        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
        int i;
@@ -2750,7 +2759,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
                        cpr2->had_work_done = 0;
                }
        }
-       __bnxt_poll_work_done(bp, bnapi);
+       __bnxt_poll_work_done(bp, bnapi, budget);
 }
 
 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
@@ -2780,7 +2789,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
                        if (cpr->has_more_work)
                                break;
 
-                       __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
+                       __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
+                                            budget);
                        cpr->cp_raw_cons = raw_cons;
                        if (napi_complete_done(napi, work_done))
                                BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
@@ -2810,7 +2820,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
                }
                raw_cons = NEXT_RAW_CMP(raw_cons);
        }
-       __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
+       __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
        if (raw_cons != cpr->cp_raw_cons) {
                cpr->cp_raw_cons = raw_cons;
                BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
@@ -2943,8 +2953,8 @@ skip_rx_tpa_free:
                rx_buf->data = NULL;
                if (BNXT_RX_PAGE_MODE(bp)) {
                        mapping -= bp->rx_dma_offset;
-                       dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
-                                            bp->rx_dir,
+                       dma_unmap_page_attrs(&pdev->dev, mapping,
+                                            BNXT_RX_PAGE_SIZE, bp->rx_dir,
                                             DMA_ATTR_WEAK_ORDERING);
                        page_pool_recycle_direct(rxr->page_pool, data);
                } else {
@@ -3213,6 +3223,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
        pp.napi = &rxr->bnapi->napi;
        pp.dev = &bp->pdev->dev;
        pp.dma_dir = DMA_BIDIRECTIONAL;
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
+               pp.flags |= PP_FLAG_PAGE_FRAG;
 
        rxr->page_pool = page_pool_create(&pp);
        if (IS_ERR(rxr->page_pool)) {
@@ -3989,26 +4001,29 @@ void bnxt_set_ring_params(struct bnxt *bp)
  */
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 {
+       struct net_device *dev = bp->dev;
+
        if (page_mode) {
                bp->flags &= ~BNXT_FLAG_AGG_RINGS;
                bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
 
-               if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+               if (bp->xdp_prog->aux->xdp_has_frags)
+                       dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+               else
+                       dev->max_mtu =
+                               min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+               if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
                        bp->flags |= BNXT_FLAG_JUMBO;
                        bp->rx_skb_func = bnxt_rx_multi_page_skb;
-                       bp->dev->max_mtu =
-                               min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
                } else {
                        bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
                        bp->rx_skb_func = bnxt_rx_page_skb;
-                       bp->dev->max_mtu =
-                               min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
                }
                bp->rx_dir = DMA_BIDIRECTIONAL;
                /* Disable LRO or GRO_HW */
-               netdev_update_features(bp->dev);
+               netdev_update_features(dev);
        } else {
-               bp->dev->max_mtu = bp->max_mtu;
+               dev->max_mtu = bp->max_mtu;
                bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
                bp->rx_dir = DMA_FROM_DEVICE;
                bp->rx_skb_func = bnxt_rx_skb;
@@ -9429,6 +9444,8 @@ static void bnxt_enable_napi(struct bnxt *bp)
                        cpr->sw_stats.rx.rx_resets++;
                bnapi->in_reset = false;
 
+               bnapi->tx_pkts = 0;
+
                if (bnapi->rx_ring) {
                        INIT_WORK(&cpr->dim.work, bnxt_dim_work);
                        cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
index 080e734..bb95c3d 100644 (file)
@@ -1005,7 +1005,7 @@ struct bnxt_napi {
        struct bnxt_tx_ring_info        *tx_ring;
 
        void                    (*tx_int)(struct bnxt *, struct bnxt_napi *,
-                                         int);
+                                         int budget);
        int                     tx_pkts;
        u8                      events;
 
index 4efa5fe..fb43232 100644 (file)
@@ -125,16 +125,20 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
        dma_unmap_len_set(tx_buf, len, 0);
 }
 
-void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 {
        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
        bool rx_doorbell_needed = false;
+       int nr_pkts = bnapi->tx_pkts;
        struct bnxt_sw_tx_bd *tx_buf;
        u16 tx_cons = txr->tx_cons;
        u16 last_tx_cons = tx_cons;
        int i, j, frags;
 
+       if (!budget)
+               return;
+
        for (i = 0; i < nr_pkts; i++) {
                tx_buf = &txr->tx_buf_ring[tx_cons];
 
@@ -161,6 +165,8 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
                }
                tx_cons = NEXT_TX(tx_cons);
        }
+
+       bnapi->tx_pkts = 0;
        WRITE_ONCE(txr->tx_cons, tx_cons);
        if (rx_doorbell_needed) {
                tx_buf = &txr->tx_buf_ring[last_tx_cons];
@@ -180,8 +186,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
                        u16 cons, u8 *data_ptr, unsigned int len,
                        struct xdp_buff *xdp)
 {
+       u32 buflen = BNXT_RX_PAGE_SIZE;
        struct bnxt_sw_rx_bd *rx_buf;
-       u32 buflen = PAGE_SIZE;
        struct pci_dev *pdev;
        dma_addr_t mapping;
        u32 offset;
@@ -297,7 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                rx_buf = &rxr->rx_buf_ring[cons];
                mapping = rx_buf->mapping - bp->rx_dma_offset;
                dma_unmap_page_attrs(&pdev->dev, mapping,
-                                    PAGE_SIZE, bp->rx_dir,
+                                    BNXT_RX_PAGE_SIZE, bp->rx_dir,
                                     DMA_ATTR_WEAK_ORDERING);
 
                /* if we are unable to allocate a new buffer, abort and reuse */
@@ -480,7 +486,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
        }
        xdp_update_skb_shared_info(skb, num_frags,
                                   sinfo->xdp_frags_size,
-                                  PAGE_SIZE * sinfo->nr_frags,
+                                  BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
                                   xdp_buff_is_frag_pfmemalloc(xdp));
        return skb;
 }
index ea430d6..5e412c5 100644 (file)
@@ -16,7 +16,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   struct bnxt_tx_ring_info *txr,
                                   dma_addr_t mapping, u32 len,
                                   struct xdp_buff *xdp);
-void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                 struct xdp_buff xdp, struct page *page, u8 **data_ptr,
                 unsigned int *len, u8 *event);
index f02d444..cf92c39 100644 (file)
@@ -8813,6 +8813,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_pf *pf = np->vsi->back;
+       bool locked = false;
        int err;
 
        switch (type) {
@@ -8822,10 +8823,27 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
                                                  ice_setup_tc_block_cb,
                                                  np, np, true);
        case TC_SETUP_QDISC_MQPRIO:
+               if (pf->adev) {
+                       mutex_lock(&pf->adev_mutex);
+                       device_lock(&pf->adev->dev);
+                       locked = true;
+                       if (pf->adev->dev.driver) {
+                               netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
+                               err = -EBUSY;
+                               goto adev_unlock;
+                       }
+               }
+
                /* setup traffic classifier for receive side */
                mutex_lock(&pf->tc_mutex);
                err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
                mutex_unlock(&pf->tc_mutex);
+
+adev_unlock:
+               if (locked) {
+                       device_unlock(&pf->adev->dev);
+                       mutex_unlock(&pf->adev_mutex);
+               }
                return err;
        default:
                return -EOPNOTSUPP;
index 2b9335c..8537578 100644 (file)
@@ -1302,11 +1302,10 @@ static int korina_probe(struct platform_device *pdev)
        else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
                eth_hw_addr_random(dev);
 
-       clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
+       clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk");
        if (IS_ERR(clk))
                return PTR_ERR(clk);
        if (clk) {
-               clk_prepare_enable(clk);
                lp->mii_clock_freq = clk_get_rate(clk);
        } else {
                lp->mii_clock_freq = 200000000; /* max possible input clk */
index 035ead7..dab61cc 100644 (file)
@@ -98,6 +98,9 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
        writeq(OCTEP_CTRL_MBOX_STATUS_INIT,
               OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
 
+       mutex_init(&mbox->h2fq_lock);
+       mutex_init(&mbox->f2hq_lock);
+
        mbox->h2fq.sz = readl(OCTEP_CTRL_MBOX_H2FQ_SZ(mbox->barmem));
        mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD(mbox->barmem);
        mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS(mbox->barmem);
index f328d95..35857dc 100644 (file)
@@ -727,7 +727,8 @@ pick_fw_ver:
 
        err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
        if (err) {
-               if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) {
+               if (ver_maj != PRESTERA_PREV_FW_MAJ_VER ||
+                   ver_min != PRESTERA_PREV_FW_MIN_VER) {
                        ver_maj = PRESTERA_PREV_FW_MAJ_VER;
                        ver_min = PRESTERA_PREV_FW_MIN_VER;
 
index f0c3464..0c88cf4 100644 (file)
@@ -1030,9 +1030,6 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
        int out_index;
        int err = 0;
 
-       if (!mlx5e_is_eswitch_flow(flow))
-               return 0;
-
        parse_attr = attr->parse_attr;
        esw_attr = attr->esw_attr;
        *vf_tun = false;
index d97e6df..b8dd744 100644 (file)
@@ -323,8 +323,11 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
        net_prefetch(mxbuf->xdp.data);
 
        prog = rcu_dereference(rq->xdp_prog);
-       if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf)))
+       if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
+               if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
+                       wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
                return NULL; /* page/packet was consumed by XDP */
+       }
 
        /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
         * will be handled by mlx5e_free_rx_wqe.
index dbe87bf..832d36b 100644 (file)
@@ -808,9 +808,9 @@ static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upsp
        }
 
        if (upspec->sport) {
-               MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
+               MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_sport,
                         upspec->sport_mask);
-               MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->sport);
+               MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_sport, upspec->sport);
        }
 }
 
index eab5bc7..8d995e3 100644 (file)
@@ -58,7 +58,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
 
        trailer_len = alen + plen + 2;
 
-       pskb_trim(skb, skb->len - trailer_len);
+       ret = pskb_trim(skb, skb->len - trailer_len);
+       if (unlikely(ret))
+               return ret;
        if (skb->protocol == htons(ETH_P_IP)) {
                ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
                ip_send_check(ipv4hdr);
index cf704f1..984fa04 100644 (file)
@@ -188,7 +188,6 @@ static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls,
 
 int mlx5e_ktls_init(struct mlx5e_priv *priv)
 {
-       struct mlx5_crypto_dek_pool *dek_pool;
        struct mlx5e_tls *tls;
 
        if (!mlx5e_is_ktls_device(priv->mdev))
@@ -199,12 +198,6 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv)
                return -ENOMEM;
        tls->mdev = priv->mdev;
 
-       dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
-       if (IS_ERR(dek_pool)) {
-               kfree(tls);
-               return PTR_ERR(dek_pool);
-       }
-       tls->dek_pool = dek_pool;
        priv->tls = tls;
 
        mlx5e_tls_debugfs_init(tls, priv->dfs_root);
@@ -222,7 +215,6 @@ void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
        debugfs_remove_recursive(tls->debugfs.dfs);
        tls->debugfs.dfs = NULL;
 
-       mlx5_crypto_dek_pool_destroy(tls->dek_pool);
        kfree(priv->tls);
        priv->tls = NULL;
 }
index efb2cf7..d61be26 100644 (file)
@@ -908,28 +908,51 @@ static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
 
 int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
 {
+       struct mlx5_crypto_dek_pool *dek_pool;
        struct mlx5e_tls *tls = priv->tls;
+       int err;
+
+       if (!mlx5e_is_ktls_device(priv->mdev))
+               return 0;
+
+       /* DEK pool could be used by either or both of TX and RX. But we have to
+        * put the creation here to avoid syndrome when doing devlink reload.
+        */
+       dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
+       if (IS_ERR(dek_pool))
+               return PTR_ERR(dek_pool);
+       tls->dek_pool = dek_pool;
 
        if (!mlx5e_is_ktls_tx(priv->mdev))
                return 0;
 
        priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
-       if (!priv->tls->tx_pool)
-               return -ENOMEM;
+       if (!priv->tls->tx_pool) {
+               err = -ENOMEM;
+               goto err_tx_pool_init;
+       }
 
        mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs);
 
        return 0;
+
+err_tx_pool_init:
+       mlx5_crypto_dek_pool_destroy(dek_pool);
+       return err;
 }
 
 void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
 {
        if (!mlx5e_is_ktls_tx(priv->mdev))
-               return;
+               goto dek_pool_destroy;
 
        debugfs_remove_recursive(priv->tls->debugfs.dfs_tx);
        priv->tls->debugfs.dfs_tx = NULL;
 
        mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
        priv->tls->tx_pool = NULL;
+
+dek_pool_destroy:
+       if (mlx5e_is_ktls_device(priv->mdev))
+               mlx5_crypto_dek_pool_destroy(priv->tls->dek_pool);
 }
index 7fc901a..414e285 100644 (file)
@@ -161,6 +161,7 @@ static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
 
        if (!in) {
                kfree(ft->g);
+               ft->g = NULL;
                return -ENOMEM;
        }
 
index 933a777..5aa51d7 100644 (file)
@@ -135,6 +135,16 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs);
 
 int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
 {
+       /* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile
+        * cleanup_rx callback and it is not recreated when
+        * mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering()
+        * is not called by the uplink_rep profile init_rx callback. Thus, if
+        * ntuple is set, moving to switchdev flow will enter this function
+        * with fs->arfs nullified.
+        */
+       if (!mlx5e_fs_get_arfs(fs))
+               return 0;
+
        arfs_del_rules(fs);
 
        return arfs_disable(fs);
index defb1ef..1c82011 100644 (file)
@@ -1036,7 +1036,23 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_s
        return err;
 }
 
-static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
+static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
+{
+       struct mlx5_cqwq *cqwq = &rq->cq.wq;
+       struct mlx5_cqe64 *cqe;
+
+       if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
+               while ((cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)))
+                       mlx5_cqwq_pop(cqwq);
+       } else {
+               while ((cqe = mlx5_cqwq_get_cqe(cqwq)))
+                       mlx5_cqwq_pop(cqwq);
+       }
+
+       mlx5_cqwq_update_db_record(cqwq);
+}
+
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
 {
        struct net_device *dev = rq->netdev;
        int err;
@@ -1046,6 +1062,10 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
                netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
                return err;
        }
+
+       mlx5e_free_rx_descs(rq);
+       mlx5e_flush_rq_cq(rq);
+
        err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
        if (err) {
                netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
@@ -1055,13 +1075,6 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
        return 0;
 }
 
-int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
-{
-       mlx5e_free_rx_descs(rq);
-
-       return mlx5e_rq_to_ready(rq, curr_state);
-}
-
 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
 {
        struct mlx5_core_dev *mdev = rq->mdev;
index 152b621..99b3843 100644 (file)
@@ -1012,7 +1012,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
        err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
        if (err) {
                mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
-               return err;
+               goto err_rx_res_free;
        }
 
        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
@@ -1046,6 +1046,7 @@ err_destroy_rx_res:
        mlx5e_rx_res_destroy(priv->rx_res);
 err_close_drop_rq:
        mlx5e_close_drop_rq(&priv->drop_rq);
+err_rx_res_free:
        mlx5e_rx_res_free(priv->rx_res);
        priv->rx_res = NULL;
 err_free_fs:
@@ -1159,6 +1160,10 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
                return err;
        }
 
+       err = mlx5e_rep_neigh_init(rpriv);
+       if (err)
+               goto err_neigh_init;
+
        if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
                err = mlx5e_init_uplink_rep_tx(rpriv);
                if (err)
@@ -1175,6 +1180,8 @@ err_ht_init:
        if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
                mlx5e_cleanup_uplink_rep_tx(rpriv);
 err_init_tx:
+       mlx5e_rep_neigh_cleanup(rpriv);
+err_neigh_init:
        mlx5e_destroy_tises(priv);
        return err;
 }
@@ -1188,22 +1195,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
        if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
                mlx5e_cleanup_uplink_rep_tx(rpriv);
 
+       mlx5e_rep_neigh_cleanup(rpriv);
        mlx5e_destroy_tises(priv);
 }
 
 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
 {
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
-
        mlx5e_set_netdev_mtu_boundaries(priv);
-       mlx5e_rep_neigh_init(rpriv);
 }
 
 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
 {
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
-
-       mlx5e_rep_neigh_cleanup(rpriv);
 }
 
 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
@@ -1253,7 +1255,6 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
 
 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
 {
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct net_device *netdev = priv->netdev;
        struct mlx5_core_dev *mdev = priv->mdev;
        u16 max_mtu;
@@ -1275,7 +1276,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
        mlx5_notifier_register(mdev, &priv->events_nb);
        mlx5e_dcbnl_initialize(priv);
        mlx5e_dcbnl_init_app(priv);
-       mlx5e_rep_neigh_init(rpriv);
        mlx5e_rep_bridge_init(priv);
 
        netdev->wanted_features |= NETIF_F_HW_TC;
@@ -1290,7 +1290,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
 
 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
 {
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_core_dev *mdev = priv->mdev;
 
        rtnl_lock();
@@ -1300,7 +1299,6 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
        rtnl_unlock();
 
        mlx5e_rep_bridge_cleanup(priv);
-       mlx5e_rep_neigh_cleanup(rpriv);
        mlx5e_dcbnl_delete_app(priv);
        mlx5_notifier_unregister(mdev, &priv->events_nb);
        mlx5e_rep_tc_disable(priv);
index 8d0a3f6..9237763 100644 (file)
@@ -1725,6 +1725,19 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
        return 0;
 }
 
+static bool
+has_encap_dests(struct mlx5_flow_attr *attr)
+{
+       struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+       int out_index;
+
+       for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+               if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+                       return true;
+
+       return false;
+}
+
 static int
 post_process_attr(struct mlx5e_tc_flow *flow,
                  struct mlx5_flow_attr *attr,
@@ -1737,9 +1750,11 @@ post_process_attr(struct mlx5e_tc_flow *flow,
        if (err)
                goto err_out;
 
-       err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
-       if (err)
-               goto err_out;
+       if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) {
+               err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
+               if (err)
+                       goto err_out;
+       }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
                err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
index b6a45ef..dbd7cbe 100644 (file)
@@ -64,7 +64,7 @@ void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_
 
        bridge->debugfs_dir = debugfs_create_dir(br_netdev->name,
                                                 bridge->br_offloads->debugfs_root);
-       debugfs_create_file("fdb", 0444, bridge->debugfs_dir, bridge,
+       debugfs_create_file("fdb", 0400, bridge->debugfs_dir, bridge,
                            &mlx5_esw_bridge_debugfs_fops);
 }
 
index bdfe609..e59380e 100644 (file)
@@ -1436,7 +1436,6 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
 
        esw_init_chains_offload_flags(esw, &attr.flags);
        attr.ns = MLX5_FLOW_NAMESPACE_FDB;
-       attr.fs_base_prio = FDB_TC_OFFLOAD;
        attr.max_grp_num = esw->params.large_group_num;
        attr.default_ft = miss_fdb;
        attr.mapping = esw->offloads.reg_c0_obj_pool;
@@ -2779,9 +2778,9 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
                                         struct mlx5_eswitch *peer_esw,
                                         bool pair)
 {
-       u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
+       u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
+       u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
        struct mlx5_flow_root_namespace *peer_ns;
-       u8 idx = mlx5_get_dev_index(esw->dev);
        struct mlx5_flow_root_namespace *ns;
        int err;
 
@@ -2789,18 +2788,18 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
        ns = esw->dev->priv.steering->fdb_root_ns;
 
        if (pair) {
-               err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
+               err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
                if (err)
                        return err;
 
-               err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
+               err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
                if (err) {
-                       mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
+                       mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
                        return err;
                }
        } else {
-               mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
-               mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
+               mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
+               mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
        }
 
        return 0;
@@ -4196,7 +4195,7 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
        }
 
        hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
-       MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
+       MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
 
        err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
                                            MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
index 91dcb0d..aab7059 100644 (file)
@@ -140,7 +140,7 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace
 
 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
                                  struct mlx5_flow_root_namespace *peer_ns,
-                                 u8 peer_idx)
+                                 u16 peer_vhca_id)
 {
        return 0;
 }
index b6b9a5a..7790ae5 100644 (file)
@@ -94,7 +94,7 @@ struct mlx5_flow_cmds {
 
        int (*set_peer)(struct mlx5_flow_root_namespace *ns,
                        struct mlx5_flow_root_namespace *peer_ns,
-                       u8 peer_idx);
+                       u16 peer_vhca_id);
 
        int (*create_ns)(struct mlx5_flow_root_namespace *ns);
        int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
index 4ef04aa..6b069fa 100644 (file)
@@ -889,7 +889,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
        struct fs_node *iter = list_entry(start, struct fs_node, list);
        struct mlx5_flow_table *ft = NULL;
 
-       if (!root || root->type == FS_TYPE_PRIO_CHAINS)
+       if (!root)
                return NULL;
 
        list_for_each_advance_continue(iter, &root->children, reverse) {
@@ -905,20 +905,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
        return ft;
 }
 
-/* If reverse is false then return the first flow table in next priority of
- * prio in the tree, else return the last flow table in the previous priority
- * of prio in the tree.
+static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
+                                              struct fs_node **child)
+{
+       struct fs_node *node = NULL;
+
+       while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
+               node = parent;
+               parent = parent->parent;
+       }
+
+       if (child)
+               *child = node;
+
+       return parent;
+}
+
+/* If reverse is false then return the first flow table next to the passed node
+ * in the tree, else return the last flow table before the node in the tree.
+ * If skip is true, skip the flow tables in the same prio_chains prio.
  */
-static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
+static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
+                                              bool skip)
 {
+       struct fs_node *prio_chains_parent = NULL;
        struct mlx5_flow_table *ft = NULL;
        struct fs_node *curr_node;
        struct fs_node *parent;
 
-       parent = prio->node.parent;
-       curr_node = &prio->node;
+       if (skip)
+               prio_chains_parent = find_prio_chains_parent(node, NULL);
+       parent = node->parent;
+       curr_node = node;
        while (!ft && parent) {
-               ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
+               if (parent != prio_chains_parent)
+                       ft = find_closest_ft_recursive(parent, &curr_node->list,
+                                                      reverse);
                curr_node = parent;
                parent = curr_node->parent;
        }
@@ -926,15 +948,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers
 }
 
 /* Assuming all the tree is locked by mutex chain lock */
-static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
+static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
 {
-       return find_closest_ft(prio, false);
+       return find_closest_ft(node, false, true);
 }
 
 /* Assuming all the tree is locked by mutex chain lock */
-static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
+static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
 {
-       return find_closest_ft(prio, true);
+       return find_closest_ft(node, true, true);
 }
 
 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
@@ -946,7 +968,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
        next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
        fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
 
-       return find_next_chained_ft(prio);
+       return find_next_chained_ft(&prio->node);
 }
 
 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
@@ -970,21 +992,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
        return 0;
 }
 
+static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
+                                                         struct fs_node *parent,
+                                                         struct fs_node **child,
+                                                         bool reverse)
+{
+       struct mlx5_flow_table *ft;
+
+       ft = find_closest_ft(node, reverse, false);
+
+       if (ft && parent == find_prio_chains_parent(&ft->node, child))
+               return ft;
+
+       return NULL;
+}
+
 /* Connect flow tables from previous priority of prio to ft */
 static int connect_prev_fts(struct mlx5_core_dev *dev,
                            struct mlx5_flow_table *ft,
                            struct fs_prio *prio)
 {
+       struct fs_node *prio_parent, *parent = NULL, *child, *node;
        struct mlx5_flow_table *prev_ft;
+       int err = 0;
+
+       prio_parent = find_prio_chains_parent(&prio->node, &child);
+
+       /* return directly if not under the first sub ns of prio_chains prio */
+       if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
+               return 0;
 
-       prev_ft = find_prev_chained_ft(prio);
-       if (prev_ft) {
+       prev_ft = find_prev_chained_ft(&prio->node);
+       while (prev_ft) {
                struct fs_prio *prev_prio;
 
                fs_get_obj(prev_prio, prev_ft->node.parent);
-               return connect_fts_in_prio(dev, prev_prio, ft);
+               err = connect_fts_in_prio(dev, prev_prio, ft);
+               if (err)
+                       break;
+
+               if (!parent) {
+                       parent = find_prio_chains_parent(&prev_prio->node, &child);
+                       if (!parent)
+                               break;
+               }
+
+               node = child;
+               prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
        }
-       return 0;
+       return err;
 }
 
 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
@@ -1123,7 +1179,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
                if (err)
                        return err;
 
-               next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
+               next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
                err = connect_fwd_rules(dev, ft, next_ft);
                if (err)
                        return err;
@@ -1198,7 +1254,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
 
        tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
        next_ft = unmanaged ? ft_attr->next_ft :
-                             find_next_chained_ft(fs_prio);
+                             find_next_chained_ft(&fs_prio->node);
        ft->def_miss_action = ns->def_miss_action;
        ft->ns = ns;
        err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
@@ -2195,13 +2251,20 @@ EXPORT_SYMBOL(mlx5_del_flow_rules);
 /* Assuming prio->node.children(flow tables) is sorted by level */
 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
 {
+       struct fs_node *prio_parent, *child;
        struct fs_prio *prio;
 
        fs_get_obj(prio, ft->node.parent);
 
        if (!list_is_last(&ft->node.list, &prio->node.children))
                return list_next_entry(ft, node.list);
-       return find_next_chained_ft(prio);
+
+       prio_parent = find_prio_chains_parent(&prio->node, &child);
+
+       if (prio_parent && list_is_first(&child->list, &prio_parent->children))
+               return find_closest_ft(&prio->node, false, false);
+
+       return find_next_chained_ft(&prio->node);
 }
 
 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
@@ -3621,7 +3684,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
 
 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
                                 struct mlx5_flow_root_namespace *peer_ns,
-                                u8 peer_idx)
+                                u16 peer_vhca_id)
 {
        if (peer_ns && ns->mode != peer_ns->mode) {
                mlx5_core_err(ns->dev,
@@ -3629,7 +3692,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
                return -EINVAL;
        }
 
-       return ns->cmds->set_peer(ns, peer_ns, peer_idx);
+       return ns->cmds->set_peer(ns, peer_ns, peer_vhca_id);
 }
 
 /* This function should be called only at init stage of the namespace.
index 03e64c4..4aed176 100644 (file)
@@ -303,7 +303,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
 
 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
                                 struct mlx5_flow_root_namespace *peer_ns,
-                                u8 peer_idx);
+                                u16 peer_vhca_id);
 
 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
                                 enum mlx5_flow_steering_mode mode);
index db9df97..a80ecb6 100644 (file)
@@ -178,7 +178,7 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
        if (!mlx5_chains_ignore_flow_level_supported(chains) ||
            (chain == 0 && prio == 1 && level == 0)) {
                ft_attr.level = chains->fs_base_level;
-               ft_attr.prio = chains->fs_base_prio;
+               ft_attr.prio = chains->fs_base_prio + prio - 1;
                ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
                        mlx5_get_fdb_sub_ns(chains->dev, chain) :
                        mlx5_get_flow_namespace(chains->dev, chains->ns);
index 88dbea6..f42abc2 100644 (file)
@@ -1506,6 +1506,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
        if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
                mlx5_core_warn(dev, "%s: interface is down, NOP\n",
                               __func__);
+               mlx5_devlink_params_unregister(priv_to_devlink(dev));
                mlx5_cleanup_once(dev);
                goto out;
        }
index e739ec6..54bb086 100644 (file)
@@ -2079,7 +2079,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
 
        peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
                (vhca_id != dmn->info.caps.gvmi);
-       vport_dmn = peer_vport ? dmn->peer_dmn[vhca_id] : dmn;
+       vport_dmn = peer_vport ? xa_load(&dmn->peer_dmn_xa, vhca_id) : dmn;
        if (!vport_dmn) {
                mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
                return NULL;
index 7491911..8c2a34a 100644 (file)
@@ -564,11 +564,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
 
        err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
        if (err)
-               return err;
+               goto err_free_in;
 
        *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
-       kvfree(in);
 
+err_free_in:
+       kvfree(in);
        return err;
 }
 
index 75dc85d..3d74109 100644 (file)
@@ -475,6 +475,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
        mutex_init(&dmn->info.rx.mutex);
        mutex_init(&dmn->info.tx.mutex);
        xa_init(&dmn->definers_xa);
+       xa_init(&dmn->peer_dmn_xa);
 
        if (dr_domain_caps_init(mdev, dmn)) {
                mlx5dr_err(dmn, "Failed init domain, no caps\n");
@@ -507,6 +508,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
 uninit_caps:
        dr_domain_caps_uninit(dmn);
 def_xa_destroy:
+       xa_destroy(&dmn->peer_dmn_xa);
        xa_destroy(&dmn->definers_xa);
        kfree(dmn);
        return NULL;
@@ -547,6 +549,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
        dr_domain_uninit_csum_recalc_fts(dmn);
        dr_domain_uninit_resources(dmn);
        dr_domain_caps_uninit(dmn);
+       xa_destroy(&dmn->peer_dmn_xa);
        xa_destroy(&dmn->definers_xa);
        mutex_destroy(&dmn->info.tx.mutex);
        mutex_destroy(&dmn->info.rx.mutex);
@@ -556,17 +559,21 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
 
 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
                            struct mlx5dr_domain *peer_dmn,
-                           u8 peer_idx)
+                           u16 peer_vhca_id)
 {
+       struct mlx5dr_domain *peer;
+
        mlx5dr_domain_lock(dmn);
 
-       if (dmn->peer_dmn[peer_idx])
-               refcount_dec(&dmn->peer_dmn[peer_idx]->refcount);
+       peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
+       if (peer)
+               refcount_dec(&peer->refcount);
 
-       dmn->peer_dmn[peer_idx] = peer_dmn;
+       WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL)));
 
-       if (dmn->peer_dmn[peer_idx])
-               refcount_inc(&dmn->peer_dmn[peer_idx]->refcount);
+       peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
+       if (peer)
+               refcount_inc(&peer->refcount);
 
        mlx5dr_domain_unlock(dmn);
 }
index 69d7a8f..f708b02 100644 (file)
@@ -1652,17 +1652,18 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
        struct mlx5dr_domain *dmn = sb->dmn;
        struct mlx5dr_domain *vport_dmn;
        u8 *bit_mask = sb->bit_mask;
+       struct mlx5dr_domain *peer;
        bool source_gvmi_set;
 
        DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
 
        if (sb->vhca_id_valid) {
+               peer = xa_load(&dmn->peer_dmn_xa, id);
                /* Find port GVMI based on the eswitch_owner_vhca_id */
                if (id == dmn->info.caps.gvmi)
                        vport_dmn = dmn;
-               else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
-                        (id == dmn->peer_dmn[id]->info.caps.gvmi))
-                       vport_dmn = dmn->peer_dmn[id];
+               else if (peer && (id == peer->info.caps.gvmi))
+                       vport_dmn = peer;
                else
                        return -EINVAL;
 
index f4ef0b2..dd856cd 100644 (file)
@@ -1984,16 +1984,17 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
        struct mlx5dr_domain *dmn = sb->dmn;
        struct mlx5dr_domain *vport_dmn;
        u8 *bit_mask = sb->bit_mask;
+       struct mlx5dr_domain *peer;
 
        DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
 
        if (sb->vhca_id_valid) {
+               peer = xa_load(&dmn->peer_dmn_xa, id);
                /* Find port GVMI based on the eswitch_owner_vhca_id */
                if (id == dmn->info.caps.gvmi)
                        vport_dmn = dmn;
-               else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
-                        (id == dmn->peer_dmn[id]->info.caps.gvmi))
-                       vport_dmn = dmn->peer_dmn[id];
+               else if (peer && (id == peer->info.caps.gvmi))
+                       vport_dmn = peer;
                else
                        return -EINVAL;
 
index 1622dbb..6c59de3 100644 (file)
@@ -935,7 +935,6 @@ struct mlx5dr_domain_info {
 };
 
 struct mlx5dr_domain {
-       struct mlx5dr_domain *peer_dmn[MLX5_MAX_PORTS];
        struct mlx5_core_dev *mdev;
        u32 pdn;
        struct mlx5_uars_page *uar;
@@ -956,6 +955,7 @@ struct mlx5dr_domain {
        struct list_head dbg_tbl_list;
        struct mlx5dr_dbg_dump_info dump_info;
        struct xarray definers_xa;
+       struct xarray peer_dmn_xa;
        /* memory management statistics */
        u32 num_buddies[DR_ICM_TYPE_MAX];
 };
index 6aac5f0..feb307f 100644 (file)
@@ -781,14 +781,14 @@ restore_fte:
 
 static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
                                struct mlx5_flow_root_namespace *peer_ns,
-                               u8 peer_idx)
+                               u16 peer_vhca_id)
 {
        struct mlx5dr_domain *peer_domain = NULL;
 
        if (peer_ns)
                peer_domain = peer_ns->fs_dr_domain.dr_domain;
        mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
-                              peer_domain, peer_idx);
+                              peer_domain, peer_vhca_id);
        return 0;
 }
 
index 24cbb33..89fced8 100644 (file)
@@ -49,7 +49,7 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
 
 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
                            struct mlx5dr_domain *peer_dmn,
-                           u8 peer_idx);
+                           u16 peer_vhca_id);
 
 struct mlx5dr_table *
 mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
index f868235..94d4f94 100644 (file)
@@ -194,6 +194,22 @@ void qed_hw_remove(struct qed_dev *cdev);
 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
 
 /**
+ * qed_ptt_acquire_context(): Allocate a PTT window honoring the context
+ *                           atomicy.
+ *
+ * @p_hwfn: HW device data.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: struct qed_ptt.
+ *
+ * Should be called at the entry point to the driver
+ * (at the beginning of an exported function).
+ */
+struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn,
+                                       bool is_atomic);
+
+/**
  * qed_ptt_release(): Release PTT Window.
  *
  * @p_hwfn: HW device data.
index 3764190..04602ac 100644 (file)
@@ -693,13 +693,14 @@ static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
 }
 
 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
-                             struct qed_fcoe_stats *p_stats)
+                             struct qed_fcoe_stats *p_stats,
+                             bool is_atomic)
 {
        struct qed_ptt *p_ptt;
 
        memset(p_stats, 0, sizeof(*p_stats));
 
-       p_ptt = qed_ptt_acquire(p_hwfn);
+       p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
 
        if (!p_ptt) {
                DP_ERR(p_hwfn, "Failed to acquire ptt\n");
@@ -973,19 +974,27 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
                                        QED_SPQ_MODE_EBLOCK, NULL);
 }
 
+static int qed_fcoe_stats_context(struct qed_dev *cdev,
+                                 struct qed_fcoe_stats *stats,
+                                 bool is_atomic)
+{
+       return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
+}
+
 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
 {
-       return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
+       return qed_fcoe_stats_context(cdev, stats, false);
 }
 
 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
-                                struct qed_mcp_fcoe_stats *stats)
+                                struct qed_mcp_fcoe_stats *stats,
+                                bool is_atomic)
 {
        struct qed_fcoe_stats proto_stats;
 
        /* Retrieve FW statistics */
        memset(&proto_stats, 0, sizeof(proto_stats));
-       if (qed_fcoe_stats(cdev, &proto_stats)) {
+       if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) {
                DP_VERBOSE(cdev, QED_MSG_STORAGE,
                           "Failed to collect FCoE statistics\n");
                return;
index 19c85ad..214e829 100644 (file)
@@ -28,8 +28,20 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
 void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
 
 void qed_fcoe_free(struct qed_hwfn *p_hwfn);
+/**
+ * qed_get_protocol_stats_fcoe(): Fills provided statistics
+ *                               struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: Void.
+ */
 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
-                                struct qed_mcp_fcoe_stats *stats);
+                                struct qed_mcp_fcoe_stats *stats,
+                                bool is_atomic);
 #else /* CONFIG_QED_FCOE */
 static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
 {
@@ -40,7 +52,8 @@ static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
 static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
 
 static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
-                                              struct qed_mcp_fcoe_stats *stats)
+                                              struct qed_mcp_fcoe_stats *stats,
+                                              bool is_atomic)
 {
 }
 #endif /* CONFIG_QED_FCOE */
index 554f30b..6263f84 100644 (file)
 #include "qed_reg_addr.h"
 #include "qed_sriov.h"
 
-#define QED_BAR_ACQUIRE_TIMEOUT 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT     1000
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP         1000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT     100000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY         10
 
 /* Invalid values */
 #define QED_BAR_INVALID_OFFSET          (cpu_to_le32(-1))
@@ -85,11 +88,21 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
 
 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
 {
+       return qed_ptt_acquire_context(p_hwfn, false);
+}
+
+struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic)
+{
        struct qed_ptt *p_ptt;
-       unsigned int i;
+       unsigned int i, count;
+
+       if (is_atomic)
+               count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT;
+       else
+               count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT;
 
        /* Take the free PTT from the list */
-       for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+       for (i = 0; i < count; i++) {
                spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
 
                if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
@@ -105,7 +118,12 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
                }
 
                spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
-               usleep_range(1000, 2000);
+
+               if (is_atomic)
+                       udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY);
+               else
+                       usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP,
+                                    QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2);
        }
 
        DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
index 511ab21..980e728 100644 (file)
@@ -999,13 +999,14 @@ static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
 }
 
 static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
-                              struct qed_iscsi_stats *stats)
+                              struct qed_iscsi_stats *stats,
+                              bool is_atomic)
 {
        struct qed_ptt *p_ptt;
 
        memset(stats, 0, sizeof(*stats));
 
-       p_ptt = qed_ptt_acquire(p_hwfn);
+       p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
        if (!p_ptt) {
                DP_ERR(p_hwfn, "Failed to acquire ptt\n");
                return -EAGAIN;
@@ -1336,9 +1337,16 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
                                           QED_SPQ_MODE_EBLOCK, NULL);
 }
 
+static int qed_iscsi_stats_context(struct qed_dev *cdev,
+                                  struct qed_iscsi_stats *stats,
+                                  bool is_atomic)
+{
+       return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
+}
+
 static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
 {
-       return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats);
+       return qed_iscsi_stats_context(cdev, stats, false);
 }
 
 static int qed_iscsi_change_mac(struct qed_dev *cdev,
@@ -1358,13 +1366,14 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev,
 }
 
 void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
-                                 struct qed_mcp_iscsi_stats *stats)
+                                 struct qed_mcp_iscsi_stats *stats,
+                                 bool is_atomic)
 {
        struct qed_iscsi_stats proto_stats;
 
        /* Retrieve FW statistics */
        memset(&proto_stats, 0, sizeof(proto_stats));
-       if (qed_iscsi_stats(cdev, &proto_stats)) {
+       if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) {
                DP_VERBOSE(cdev, QED_MSG_STORAGE,
                           "Failed to collect ISCSI statistics\n");
                return;
index dec2b00..974cb8d 100644 (file)
@@ -39,11 +39,14 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn);
  *
  * @cdev: Qed dev pointer.
  * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
  *
+ * Context: The function should not sleep in case is_atomic == true.
  * Return: Void.
  */
 void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
-                                 struct qed_mcp_iscsi_stats *stats);
+                                 struct qed_mcp_iscsi_stats *stats,
+                                 bool is_atomic);
 #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
 static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
 {
@@ -56,7 +59,8 @@ static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
 
 static inline void
 qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
-                            struct qed_mcp_iscsi_stats *stats) {}
+                            struct qed_mcp_iscsi_stats *stats,
+                            bool is_atomic) {}
 #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
 
 #endif
index 7776d3b..970b9aa 100644 (file)
@@ -1863,7 +1863,8 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
 }
 
 static void _qed_get_vport_stats(struct qed_dev *cdev,
-                                struct qed_eth_stats *stats)
+                                struct qed_eth_stats *stats,
+                                bool is_atomic)
 {
        u8 fw_vport = 0;
        int i;
@@ -1872,10 +1873,11 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
 
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-               struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
-                                                   :  NULL;
+               struct qed_ptt *p_ptt;
                bool b_get_port_stats;
 
+               p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic)
+                                   : NULL;
                if (IS_PF(cdev)) {
                        /* The main vport index is relative first */
                        if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
@@ -1901,6 +1903,13 @@ out:
 
 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
 {
+       qed_get_vport_stats_context(cdev, stats, false);
+}
+
+void qed_get_vport_stats_context(struct qed_dev *cdev,
+                                struct qed_eth_stats *stats,
+                                bool is_atomic)
+{
        u32 i;
 
        if (!cdev || cdev->recov_in_prog) {
@@ -1908,7 +1917,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
                return;
        }
 
-       _qed_get_vport_stats(cdev, stats);
+       _qed_get_vport_stats(cdev, stats, is_atomic);
 
        if (!cdev->reset_stats)
                return;
@@ -1960,7 +1969,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
        if (!cdev->reset_stats) {
                DP_INFO(cdev, "Reset stats not allocated\n");
        } else {
-               _qed_get_vport_stats(cdev, cdev->reset_stats);
+               _qed_get_vport_stats(cdev, cdev->reset_stats, false);
                cdev->reset_stats->common.link_change_count = 0;
        }
 }
index a538cf4..2d2f82c 100644 (file)
@@ -249,8 +249,32 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
                            enum spq_mode comp_mode,
                            struct qed_spq_comp_cb *p_comp_data);
 
+/**
+ * qed_get_vport_stats(): Fills provided statistics
+ *                       struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ *
+ * Return: Void.
+ */
 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
 
+/**
+ * qed_get_vport_stats_context(): Fills provided statistics
+ *                               struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: Void.
+ */
+void qed_get_vport_stats_context(struct qed_dev *cdev,
+                                struct qed_eth_stats *stats,
+                                bool is_atomic);
+
 void qed_reset_vport_stats(struct qed_dev *cdev);
 
 /**
index f5af833..c278f88 100644 (file)
@@ -3092,7 +3092,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
 
        switch (type) {
        case QED_MCP_LAN_STATS:
-               qed_get_vport_stats(cdev, &eth_stats);
+               qed_get_vport_stats_context(cdev, &eth_stats, true);
                stats->lan_stats.ucast_rx_pkts =
                                        eth_stats.common.rx_ucast_pkts;
                stats->lan_stats.ucast_tx_pkts =
@@ -3100,10 +3100,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
                stats->lan_stats.fcs_err = -1;
                break;
        case QED_MCP_FCOE_STATS:
-               qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
+               qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true);
                break;
        case QED_MCP_ISCSI_STATS:
-               qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+               qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true);
                break;
        default:
                DP_VERBOSE(cdev, QED_MSG_SP,
index 9e5ce2a..cf1d67b 100644 (file)
  */
 struct ef4_loopback_payload {
        char pad[2]; /* Ensures ip is 4-byte aligned */
-       struct ethhdr header;
-       struct iphdr ip;
-       struct udphdr udp;
-       __be16 iteration;
-       char msg[64];
+       struct_group_attr(packet, __packed,
+               struct ethhdr header;
+               struct iphdr ip;
+               struct udphdr udp;
+               __be16 iteration;
+               char msg[64];
+       );
 } __packed __aligned(4);
-#define EF4_LOOPBACK_PAYLOAD_LEN       (sizeof(struct ef4_loopback_payload) - \
-                                        offsetof(struct ef4_loopback_payload, \
-                                                 header))
+#define EF4_LOOPBACK_PAYLOAD_LEN       \
+               sizeof_field(struct ef4_loopback_payload, packet)
 
 /* Loopback test source MAC address */
 static const u8 payload_source[ETH_ALEN] __aligned(2) = {
@@ -299,7 +300,7 @@ void ef4_loopback_rx_packet(struct ef4_nic *efx,
 
        payload = &state->payload;
 
-       memcpy(&received.header, buf_ptr,
+       memcpy(&received.packet, buf_ptr,
               min_t(int, pkt_len, EF4_LOOPBACK_PAYLOAD_LEN));
        received.ip.saddr = payload->ip.saddr;
        if (state->offload_csum)
@@ -370,7 +371,7 @@ void ef4_loopback_rx_packet(struct ef4_nic *efx,
                               buf_ptr, pkt_len, 0);
                netif_err(efx, drv, efx->net_dev, "expected packet:\n");
                print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
-                              &state->payload.header, EF4_LOOPBACK_PAYLOAD_LEN,
+                              &state->payload.packet, EF4_LOOPBACK_PAYLOAD_LEN,
                               0);
        }
 #endif
@@ -440,6 +441,8 @@ static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue)
                payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
                /* Strip off the leading padding */
                skb_pull(skb, offsetof(struct ef4_loopback_payload, header));
+               /* Strip off the trailing padding */
+               skb_trim(skb, EF4_LOOPBACK_PAYLOAD_LEN);
 
                /* Ensure everything we've written is visible to the
                 * interrupt handler. */
index 96d856b..19a0b85 100644 (file)
  */
 struct efx_loopback_payload {
        char pad[2]; /* Ensures ip is 4-byte aligned */
-       struct ethhdr header;
-       struct iphdr ip;
-       struct udphdr udp;
-       __be16 iteration;
-       char msg[64];
+       struct_group_attr(packet, __packed,
+               struct ethhdr header;
+               struct iphdr ip;
+               struct udphdr udp;
+               __be16 iteration;
+               char msg[64];
+       );
 } __packed __aligned(4);
-#define EFX_LOOPBACK_PAYLOAD_LEN       (sizeof(struct efx_loopback_payload) - \
-                                        offsetof(struct efx_loopback_payload, \
-                                                 header))
+#define EFX_LOOPBACK_PAYLOAD_LEN       \
+               sizeof_field(struct efx_loopback_payload, packet)
 
 /* Loopback test source MAC address */
 static const u8 payload_source[ETH_ALEN] __aligned(2) = {
@@ -297,7 +298,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
 
        payload = &state->payload;
 
-       memcpy(&received.header, buf_ptr,
+       memcpy(&received.packet, buf_ptr,
               min_t(int, pkt_len, EFX_LOOPBACK_PAYLOAD_LEN));
        received.ip.saddr = payload->ip.saddr;
        if (state->offload_csum)
@@ -368,7 +369,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
                               buf_ptr, pkt_len, 0);
                netif_err(efx, drv, efx->net_dev, "expected packet:\n");
                print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
-                              &state->payload.header, EFX_LOOPBACK_PAYLOAD_LEN,
+                              &state->payload.packet, EFX_LOOPBACK_PAYLOAD_LEN,
                               0);
        }
 #endif
@@ -438,6 +439,8 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
                payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
                /* Strip off the leading padding */
                skb_pull(skb, offsetof(struct efx_loopback_payload, header));
+               /* Strip off the trailing padding */
+               skb_trim(skb, EFX_LOOPBACK_PAYLOAD_LEN);
 
                /* Ensure everything we've written is visible to the
                 * interrupt handler. */
index 111ac17..b55fd33 100644 (file)
  */
 struct efx_loopback_payload {
        char pad[2]; /* Ensures ip is 4-byte aligned */
-       struct ethhdr header;
-       struct iphdr ip;
-       struct udphdr udp;
-       __be16 iteration;
-       char msg[64];
+       struct_group_attr(packet, __packed,
+               struct ethhdr header;
+               struct iphdr ip;
+               struct udphdr udp;
+               __be16 iteration;
+               char msg[64];
+       );
 } __packed __aligned(4);
-#define EFX_LOOPBACK_PAYLOAD_LEN       (sizeof(struct efx_loopback_payload) - \
-                                        offsetof(struct efx_loopback_payload, \
-                                                 header))
+#define EFX_LOOPBACK_PAYLOAD_LEN       \
+               sizeof_field(struct efx_loopback_payload, packet)
 
 /* Loopback test source MAC address */
 static const u8 payload_source[ETH_ALEN] __aligned(2) = {
@@ -297,7 +298,7 @@ void efx_siena_loopback_rx_packet(struct efx_nic *efx,
 
        payload = &state->payload;
 
-       memcpy(&received.header, buf_ptr,
+       memcpy(&received.packet, buf_ptr,
               min_t(int, pkt_len, EFX_LOOPBACK_PAYLOAD_LEN));
        received.ip.saddr = payload->ip.saddr;
        if (state->offload_csum)
@@ -368,7 +369,7 @@ void efx_siena_loopback_rx_packet(struct efx_nic *efx,
                               buf_ptr, pkt_len, 0);
                netif_err(efx, drv, efx->net_dev, "expected packet:\n");
                print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
-                              &state->payload.header, EFX_LOOPBACK_PAYLOAD_LEN,
+                              &state->payload.packet, EFX_LOOPBACK_PAYLOAD_LEN,
                               0);
        }
 #endif
@@ -438,6 +439,8 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
                payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
                /* Strip off the leading padding */
                skb_pull(skb, offsetof(struct efx_loopback_payload, header));
+               /* Strip off the trailing padding */
+               skb_trim(skb, EFX_LOOPBACK_PAYLOAD_LEN);
 
                /* Ensure everything we've written is visible to the
                 * interrupt handler. */
index 2d7347b..0dcd6a5 100644 (file)
@@ -1851,6 +1851,17 @@ static int netsec_of_probe(struct platform_device *pdev,
                return err;
        }
 
+       /*
+        * SynQuacer is physically configured with TX and RX delays
+        * but the standard firmware claimed otherwise for a long
+        * time, ignore it.
+        */
+       if (of_machine_is_compatible("socionext,developer-box") &&
+           priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) {
+               dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n");
+               priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
+       }
+
        priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
        if (!priv->phy_np) {
                dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
index f8367c5..fbb0ccf 100644 (file)
@@ -234,7 +234,8 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
        res.addr = mgbe->regs;
        res.irq = irq;
 
-       mgbe->clks = devm_kzalloc(&pdev->dev, sizeof(*mgbe->clks), GFP_KERNEL);
+       mgbe->clks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(mgbe_clks),
+                                 sizeof(*mgbe->clks), GFP_KERNEL);
        if (!mgbe->clks)
                return -ENOMEM;
 
index e0ac1bc..49f3033 100644 (file)
@@ -1567,12 +1567,16 @@ static int temac_probe(struct platform_device *pdev)
        }
 
        /* Error handle returned DMA RX and TX interrupts */
-       if (lp->rx_irq < 0)
-               return dev_err_probe(&pdev->dev, lp->rx_irq,
+       if (lp->rx_irq <= 0) {
+               rc = lp->rx_irq ?: -EINVAL;
+               return dev_err_probe(&pdev->dev, rc,
                                     "could not get DMA RX irq\n");
-       if (lp->tx_irq < 0)
-               return dev_err_probe(&pdev->dev, lp->tx_irq,
+       }
+       if (lp->tx_irq <= 0) {
+               rc = lp->tx_irq ?: -EINVAL;
+               return dev_err_probe(&pdev->dev, rc,
                                     "could not get DMA TX irq\n");
+       }
 
        if (temac_np) {
                /* Retrieve the MAC address */
index 9137fb8..49d1d6a 100644 (file)
@@ -534,7 +534,7 @@ static int tap_open(struct inode *inode, struct file *file)
        q->sock.state = SS_CONNECTED;
        q->sock.file = file;
        q->sock.ops = &tap_socket_ops;
-       sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
+       sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
        q->sk.sk_write_space = tap_sock_write_space;
        q->sk.sk_destruct = tap_sock_destruct;
        q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
index d75456a..25f0191 100644 (file)
@@ -3469,7 +3469,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        tfile->socket.file = file;
        tfile->socket.ops = &tun_socket_ops;
 
-       sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
+       sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
 
        tfile->sk.sk_write_space = tun_sock_write_space;
        tfile->sk.sk_sndbuf = INT_MAX;
index c00a89b..6d61052 100644 (file)
@@ -618,6 +618,13 @@ static const struct usb_device_id  products[] = {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                          | USB_DEVICE_ID_MATCH_DEVICE,
        .idVendor               = 0x04DD,
+       .idProduct              = 0x8005,   /* A-300 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info        = 0,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                         | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
        .idProduct              = 0x8006,       /* B-500/SL-5600 */
        ZAURUS_MASTER_INTERFACE,
        .driver_info            = 0,
@@ -625,11 +632,25 @@ static const struct usb_device_id products[] = {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                          | USB_DEVICE_ID_MATCH_DEVICE,
        .idVendor               = 0x04DD,
+       .idProduct              = 0x8006,   /* B-500/SL-5600 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info        = 0,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                         | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
        .idProduct              = 0x8007,       /* C-700 */
        ZAURUS_MASTER_INTERFACE,
        .driver_info            = 0,
 }, {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                         | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
+       .idProduct              = 0x8007,   /* C-700 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info        = 0,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                 | USB_DEVICE_ID_MATCH_DEVICE,
        .idVendor               = 0x04DD,
        .idProduct              = 0x9031,       /* C-750 C-760 */
index c458c03..59cde06 100644 (file)
@@ -4224,8 +4224,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
        if (!dev)
                return;
 
-       set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
-
        netif_napi_del(&dev->napi);
 
        udev = interface_to_usbdev(intf);
@@ -4233,6 +4231,8 @@ static void lan78xx_disconnect(struct usb_interface *intf)
 
        unregister_netdev(net);
 
+       timer_shutdown_sync(&dev->stat_monitor);
+       set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
        cancel_delayed_work_sync(&dev->wq);
 
        phydev = net->phydev;
@@ -4247,9 +4247,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
 
        usb_scuttle_anchored_urbs(&dev->deferred);
 
-       if (timer_pending(&dev->stat_monitor))
-               del_timer_sync(&dev->stat_monitor);
-
        lan78xx_unbind(dev, intf);
 
        lan78xx_free_tx_resources(dev);
index 417f7ea..344af3c 100644 (file)
@@ -1423,6 +1423,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
        {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
        {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
        {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with eSIM support*/
index 7984f21..df3617c 100644 (file)
@@ -289,11 +289,25 @@ static const struct usb_device_id products [] = {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                          | USB_DEVICE_ID_MATCH_DEVICE,
        .idVendor               = 0x04DD,
+       .idProduct              = 0x8005,       /* A-300 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                         | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
        .idProduct              = 0x8006,       /* B-500/SL-5600 */
        ZAURUS_MASTER_INTERFACE,
        .driver_info = ZAURUS_PXA_INFO,
 }, {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                         | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
+       .idProduct              = 0x8006,       /* B-500/SL-5600 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                  | USB_DEVICE_ID_MATCH_DEVICE,
        .idVendor               = 0x04DD,
        .idProduct              = 0x8007,       /* C-700 */
@@ -301,6 +315,13 @@ static const struct usb_device_id  products [] = {
        .driver_info = ZAURUS_PXA_INFO,
 }, {
        .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
+                         | USB_DEVICE_ID_MATCH_DEVICE,
+       .idVendor               = 0x04DD,
+       .idProduct              = 0x8007,       /* C-700 */
+       ZAURUS_FAKE_INTERFACE,
+       .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+       .match_flags    =   USB_DEVICE_ID_MATCH_INT_INFO
                 | USB_DEVICE_ID_MATCH_DEVICE,
        .idVendor               = 0x04DD,
        .idProduct              = 0x9031,       /* C-750 C-760 */
index 1cebba7..139da57 100644 (file)
@@ -376,7 +376,6 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
                struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 
                if (!irq_grp->napi_enabled) {
-                       dev_set_threaded(&irq_grp->napi_ndev, true);
                        napi_enable(&irq_grp->napi);
                        irq_grp->napi_enabled = true;
                }
index c899616..c630836 100644 (file)
@@ -466,7 +466,6 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
                struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 
                if (!irq_grp->napi_enabled) {
-                       dev_set_threaded(&irq_grp->napi_ndev, true);
                        napi_enable(&irq_grp->napi);
                        irq_grp->napi_enabled = true;
                }
index a75bfa9..dc2b3b4 100644 (file)
@@ -36,11 +36,6 @@ ath6kl_core-y += wmi.o
 ath6kl_core-y += core.o
 ath6kl_core-y += recovery.o
 
-# FIXME: temporarily silence -Wdangling-pointer on non W=1+ builds
-ifndef KBUILD_EXTRA_WARN
-CFLAGS_htc_mbox.o += $(call cc-disable-warning, dangling-pointer)
-endif
-
 ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
 
index 792adaf..bece267 100644 (file)
@@ -398,7 +398,12 @@ struct brcmf_scan_params_le {
                                 * fixed parameter portion is assumed, otherwise
                                 * ssid in the fixed portion is ignored
                                 */
-       __le16 channel_list[1]; /* list of chanspecs */
+       union {
+               __le16 padding; /* Reserve space for at least 1 entry for abort
+                                * which uses an on stack brcmf_scan_params_le
+                                */
+               DECLARE_FLEX_ARRAY(__le16, channel_list);       /* chanspecs */
+       };
 };
 
 struct brcmf_scan_params_v2_le {
index 2b0f332..1f3bde8 100644 (file)
@@ -577,7 +577,7 @@ struct tx_msg {
     struct tib_structure tib;
     struct phy_header phy;
     struct mac_header mac;
-    UCHAR  var[1];
+    UCHAR  var[];
 };
 
 /****** ECF Receive Control Structure (RCS) Area at Shared RAM offset 0x0800  */
index 68e8822..ccedea7 100644 (file)
@@ -128,12 +128,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
        case MT_EE_5GHZ:
                dev->mphy.cap.has_5ghz = true;
                break;
-       case MT_EE_2GHZ:
-               dev->mphy.cap.has_2ghz = true;
-               break;
        case MT_EE_DBDC:
                dev->dbdc_support = true;
                fallthrough;
+       case MT_EE_2GHZ:
+               dev->mphy.cap.has_2ghz = true;
+               break;
        default:
                dev->mphy.cap.has_2ghz = true;
                dev->mphy.cap.has_5ghz = true;
index 4e646e5..8fac57b 100644 (file)
@@ -818,7 +818,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
                return -EINVAL;
 
        ra.reg = rd->regs[rpi->id];
-       if (!ra.reg)
+       if (!ra.reg.val)
                return -EINVAL;
 
        /* non-hardware data are collected by the polling thread */
@@ -830,7 +830,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
        ra.mask = rpi->mask;
 
        if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
-               pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg, rd->rp->name, rd->name);
+               pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg.val, rd->rp->name, rd->name);
                return -EIO;
        }
 
@@ -920,7 +920,7 @@ static int rapl_check_unit_core(struct rapl_domain *rd)
        ra.mask = ~0;
        if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
                pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
-                       ra.reg, rd->rp->name, rd->name);
+                       ra.reg.val, rd->rp->name, rd->name);
                return -ENODEV;
        }
 
@@ -948,7 +948,7 @@ static int rapl_check_unit_atom(struct rapl_domain *rd)
        ra.mask = ~0;
        if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
                pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
-                       ra.reg, rd->rp->name, rd->name);
+                       ra.reg.val, rd->rp->name, rd->name);
                return -ENODEV;
        }
 
@@ -1135,7 +1135,7 @@ static int rapl_check_unit_tpmi(struct rapl_domain *rd)
        ra.mask = ~0;
        if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
                pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
-                       ra.reg, rd->rp->name, rd->name);
+                       ra.reg.val, rd->rp->name, rd->name);
                return -ENODEV;
        }
 
@@ -1411,8 +1411,8 @@ static int rapl_get_domain_unit(struct rapl_domain *rd)
        struct rapl_defaults *defaults = get_defaults(rd->rp);
        int ret;
 
-       if (!rd->regs[RAPL_DOMAIN_REG_UNIT]) {
-               if (!rd->rp->priv->reg_unit) {
+       if (!rd->regs[RAPL_DOMAIN_REG_UNIT].val) {
+               if (!rd->rp->priv->reg_unit.val) {
                        pr_err("No valid Unit register found\n");
                        return -ENODEV;
                }
index 569e25e..dd47102 100644 (file)
@@ -34,28 +34,32 @@ static struct rapl_if_priv *rapl_msr_priv;
 
 static struct rapl_if_priv rapl_msr_priv_intel = {
        .type = RAPL_IF_MSR,
-       .reg_unit = MSR_RAPL_POWER_UNIT,
-       .regs[RAPL_DOMAIN_PACKAGE] = {
-               MSR_PKG_POWER_LIMIT, MSR_PKG_ENERGY_STATUS, MSR_PKG_PERF_STATUS, 0, MSR_PKG_POWER_INFO },
-       .regs[RAPL_DOMAIN_PP0] = {
-               MSR_PP0_POWER_LIMIT, MSR_PP0_ENERGY_STATUS, 0, MSR_PP0_POLICY, 0 },
-       .regs[RAPL_DOMAIN_PP1] = {
-               MSR_PP1_POWER_LIMIT, MSR_PP1_ENERGY_STATUS, 0, MSR_PP1_POLICY, 0 },
-       .regs[RAPL_DOMAIN_DRAM] = {
-               MSR_DRAM_POWER_LIMIT, MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, 0, MSR_DRAM_POWER_INFO },
-       .regs[RAPL_DOMAIN_PLATFORM] = {
-               MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0},
+       .reg_unit.msr = MSR_RAPL_POWER_UNIT,
+       .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr   = MSR_PKG_POWER_LIMIT,
+       .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr  = MSR_PKG_ENERGY_STATUS,
+       .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr    = MSR_PKG_PERF_STATUS,
+       .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr    = MSR_PKG_POWER_INFO,
+       .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr       = MSR_PP0_POWER_LIMIT,
+       .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr      = MSR_PP0_ENERGY_STATUS,
+       .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr      = MSR_PP0_POLICY,
+       .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_LIMIT].msr       = MSR_PP1_POWER_LIMIT,
+       .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_STATUS].msr      = MSR_PP1_ENERGY_STATUS,
+       .regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_POLICY].msr      = MSR_PP1_POLICY,
+       .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_LIMIT].msr      = MSR_DRAM_POWER_LIMIT,
+       .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_STATUS].msr     = MSR_DRAM_ENERGY_STATUS,
+       .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_PERF].msr       = MSR_DRAM_PERF_STATUS,
+       .regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_INFO].msr       = MSR_DRAM_POWER_INFO,
+       .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT].msr  = MSR_PLATFORM_POWER_LIMIT,
+       .regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS].msr = MSR_PLATFORM_ENERGY_STATUS,
        .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2),
        .limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2),
 };
 
 static struct rapl_if_priv rapl_msr_priv_amd = {
        .type = RAPL_IF_MSR,
-       .reg_unit = MSR_AMD_RAPL_POWER_UNIT,
-       .regs[RAPL_DOMAIN_PACKAGE] = {
-               0, MSR_AMD_PKG_ENERGY_STATUS, 0, 0, 0 },
-       .regs[RAPL_DOMAIN_PP0] = {
-               0, MSR_AMD_CORE_ENERGY_STATUS, 0, 0, 0 },
+       .reg_unit.msr = MSR_AMD_RAPL_POWER_UNIT,
+       .regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr  = MSR_AMD_PKG_ENERGY_STATUS,
+       .regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr      = MSR_AMD_CORE_ENERGY_STATUS,
 };
 
 /* Handles CPU hotplug on multi-socket systems.
@@ -99,10 +103,8 @@ static int rapl_cpu_down_prep(unsigned int cpu)
 
 static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
 {
-       u32 msr = (u32)ra->reg;
-
-       if (rdmsrl_safe_on_cpu(cpu, msr, &ra->value)) {
-               pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
+       if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
+               pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu);
                return -EIO;
        }
        ra->value &= ra->mask;
@@ -112,17 +114,16 @@ static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
 static void rapl_msr_update_func(void *info)
 {
        struct reg_action *ra = info;
-       u32 msr = (u32)ra->reg;
        u64 val;
 
-       ra->err = rdmsrl_safe(msr, &val);
+       ra->err = rdmsrl_safe(ra->reg.msr, &val);
        if (ra->err)
                return;
 
        val &= ~ra->mask;
        val |= ra->value;
 
-       ra->err = wrmsrl_safe(msr, val);
+       ra->err = wrmsrl_safe(ra->reg.msr, val);
 }
 
 static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
@@ -171,7 +172,7 @@ static int rapl_msr_probe(struct platform_device *pdev)
 
        if (id) {
                rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4);
-               rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] =
+               rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4].msr =
                        MSR_VR_CURRENT_CONFIG;
                pr_info("PL4 support detected.\n");
        }
index 4f4f13d..891c90f 100644 (file)
@@ -59,10 +59,10 @@ static struct powercap_control_type *tpmi_control_type;
 
 static int tpmi_rapl_read_raw(int id, struct reg_action *ra)
 {
-       if (!ra->reg)
+       if (!ra->reg.mmio)
                return -EINVAL;
 
-       ra->value = readq((void __iomem *)ra->reg);
+       ra->value = readq(ra->reg.mmio);
 
        ra->value &= ra->mask;
        return 0;
@@ -72,15 +72,15 @@ static int tpmi_rapl_write_raw(int id, struct reg_action *ra)
 {
        u64 val;
 
-       if (!ra->reg)
+       if (!ra->reg.mmio)
                return -EINVAL;
 
-       val = readq((void __iomem *)ra->reg);
+       val = readq(ra->reg.mmio);
 
        val &= ~ra->mask;
        val |= ra->value;
 
-       writeq(val, (void __iomem *)ra->reg);
+       writeq(val, ra->reg.mmio);
        return 0;
 }
 
@@ -138,8 +138,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
        enum tpmi_rapl_register reg_index;
        enum rapl_domain_reg_id reg_id;
        int tpmi_domain_size, tpmi_domain_flags;
-       u64 *tpmi_rapl_regs = trp->base + offset;
-       u64 tpmi_domain_header = readq((void __iomem *)tpmi_rapl_regs);
+       u64 tpmi_domain_header = readq(trp->base + offset);
 
        /* Domain Parent bits are ignored for now */
        tpmi_domain_version = tpmi_domain_header & 0xff;
@@ -180,7 +179,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
                return -EINVAL;
        }
 
-       if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT]) {
+       if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT].mmio) {
                pr_warn(FW_BUG "Duplicate Domain type %d\n", tpmi_domain_type);
                return -EINVAL;
        }
@@ -218,7 +217,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
                default:
                        continue;
                }
-               trp->priv.regs[domain_type][reg_id] = (u64)&tpmi_rapl_regs[reg_index];
+               trp->priv.regs[domain_type][reg_id].mmio = trp->base + offset + reg_index * 8;
        }
 
        return 0;
index 1d19542..613eab7 100644 (file)
@@ -716,7 +716,6 @@ struct qeth_card_info {
        u16 chid;
        u8 ids_valid:1; /* cssid,iid,chid */
        u8 dev_addr_is_registered:1;
-       u8 open_when_online:1;
        u8 promisc_mode:1;
        u8 use_v1_blkt:1;
        u8 is_vm_nic:1;
index 1d5b207..cd78329 100644 (file)
@@ -5373,8 +5373,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
        qeth_clear_ipacmd_list(card);
 
        rtnl_lock();
-       card->info.open_when_online = card->dev->flags & IFF_UP;
-       dev_close(card->dev);
        netif_device_detach(card->dev);
        netif_carrier_off(card->dev);
        rtnl_unlock();
index 9f13ed1..75910c0 100644 (file)
@@ -2388,9 +2388,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
                qeth_enable_hw_features(dev);
                qeth_l2_enable_brport_features(card);
 
-               if (card->info.open_when_online) {
-                       card->info.open_when_online = 0;
-                       dev_open(dev, NULL);
+               if (netif_running(dev)) {
+                       local_bh_disable();
+                       napi_schedule(&card->napi);
+                       /* kick-start the NAPI softirq: */
+                       local_bh_enable();
+                       qeth_l2_set_rx_mode(dev);
                }
                rtnl_unlock();
        }
index af4e60d..b92a32b 100644 (file)
@@ -2018,9 +2018,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
                netif_device_attach(dev);
                qeth_enable_hw_features(dev);
 
-               if (card->info.open_when_online) {
-                       card->info.open_when_online = 0;
-                       dev_open(dev, NULL);
+               if (netif_running(dev)) {
+                       local_bh_disable();
+                       napi_schedule(&card->napi);
+                       /* kick-start the NAPI softirq: */
+                       local_bh_enable();
                }
                rtnl_unlock();
        }
index f213075..4f0d0e5 100644 (file)
@@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data)
 
        /* re-init to undo drop from zfcp_fc_adisc() */
        port->d_id = ntoh24(adisc_resp->adisc_port_id);
-       /* port is good, unblock rport without going through erp */
-       zfcp_scsi_schedule_rport_register(port);
+       /* port is still good, nothing to do */
  out:
        atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
        put_device(&port->dev);
@@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work)
        int retval;
 
        set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
-       get_device(&port->dev);
-       port->rport_task = RPORT_DEL;
-       zfcp_scsi_rport_work(&port->rport_work);
 
        /* only issue one test command at one time per port */
        if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
index 2e886c1..4995e1e 100644 (file)
@@ -1181,7 +1181,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
                pm80xx_set_thermal_config(pm8001_ha);
        }
 
-       if (pm8001_init_sas_add(pm8001_ha))
+       rc = pm8001_init_sas_add(pm8001_ha);
+       if (rc)
                goto err_out_shost;
        /* phy setting support for motherboard controller */
        rc = pm8001_configure_phy_settings(pm8001_ha);
index 7f12d93..f282321 100644 (file)
@@ -366,6 +366,7 @@ static void storvsc_on_channel_callback(void *context);
 #define STORVSC_FC_MAX_LUNS_PER_TARGET                 255
 #define STORVSC_FC_MAX_TARGETS                         128
 #define STORVSC_FC_MAX_CHANNELS                                8
+#define STORVSC_FC_MAX_XFER_SIZE                       ((u32)(512 * 1024))
 
 #define STORVSC_IDE_MAX_LUNS_PER_TARGET                        64
 #define STORVSC_IDE_MAX_TARGETS                                1
@@ -2006,6 +2007,9 @@ static int storvsc_probe(struct hv_device *device,
         * protecting it from any weird value.
         */
        max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
+       if (is_fc)
+               max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE);
+
        /* max_hw_sectors_kb */
        host->max_sectors = max_xfer_bytes >> 9;
        /*
index 870aecc..1c1fcab 100644 (file)
@@ -164,7 +164,7 @@ static int imx8mp_hsio_blk_ctrl_probe(struct imx8mp_blk_ctrl *bc)
        clk_hsio_pll->hw.init = &init;
 
        hw = &clk_hsio_pll->hw;
-       ret = devm_clk_hw_register(bc->dev, hw);
+       ret = devm_clk_hw_register(bc->bus_power_dev, hw);
        if (ret)
                return ret;
 
index 013f163..2f00fc3 100644 (file)
@@ -57,10 +57,10 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu)
 
 static int rapl_mmio_read_raw(int cpu, struct reg_action *ra)
 {
-       if (!ra->reg)
+       if (!ra->reg.mmio)
                return -EINVAL;
 
-       ra->value = readq((void __iomem *)ra->reg);
+       ra->value = readq(ra->reg.mmio);
        ra->value &= ra->mask;
        return 0;
 }
@@ -69,13 +69,13 @@ static int rapl_mmio_write_raw(int cpu, struct reg_action *ra)
 {
        u64 val;
 
-       if (!ra->reg)
+       if (!ra->reg.mmio)
                return -EINVAL;
 
-       val = readq((void __iomem *)ra->reg);
+       val = readq(ra->reg.mmio);
        val &= ~ra->mask;
        val |= ra->value;
-       writeq(val, (void __iomem *)ra->reg);
+       writeq(val, ra->reg.mmio);
        return 0;
 }
 
@@ -92,13 +92,13 @@ int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc
        for (domain = RAPL_DOMAIN_PACKAGE; domain < RAPL_DOMAIN_MAX; domain++) {
                for (reg = RAPL_DOMAIN_REG_LIMIT; reg < RAPL_DOMAIN_REG_MAX; reg++)
                        if (rapl_regs->regs[domain][reg])
-                               rapl_mmio_priv.regs[domain][reg] =
-                                               (u64)proc_priv->mmio_base +
+                               rapl_mmio_priv.regs[domain][reg].mmio =
+                                               proc_priv->mmio_base +
                                                rapl_regs->regs[domain][reg];
                rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain];
        }
        rapl_mmio_priv.type = RAPL_IF_MMIO;
-       rapl_mmio_priv.reg_unit = (u64)proc_priv->mmio_base + rapl_regs->reg_unit;
+       rapl_mmio_priv.reg_unit.mmio = proc_priv->mmio_base + rapl_regs->reg_unit;
 
        rapl_mmio_priv.read_raw = rapl_mmio_read_raw;
        rapl_mmio_priv.write_raw = rapl_mmio_write_raw;
index 66048a8..5fb367b 100644 (file)
@@ -4764,7 +4764,7 @@ static void delayed_work(struct work_struct *work)
 
        dout("mdsc delayed_work\n");
 
-       if (mdsc->stopping)
+       if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
                return;
 
        mutex_lock(&mdsc->mutex);
@@ -4943,7 +4943,7 @@ void send_flush_mdlog(struct ceph_mds_session *s)
 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
 {
        dout("pre_umount\n");
-       mdsc->stopping = 1;
+       mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
 
        ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
        ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
index 724307f..86d2965 100644 (file)
@@ -380,6 +380,11 @@ struct cap_wait {
        int                     want;
 };
 
+enum {
+       CEPH_MDSC_STOPPING_BEGIN = 1,
+       CEPH_MDSC_STOPPING_FLUSHED = 2,
+};
+
 /*
  * mds client state
  */
index 3fc48b4..a5f5201 100644 (file)
@@ -1374,6 +1374,16 @@ static void ceph_kill_sb(struct super_block *s)
        ceph_mdsc_pre_umount(fsc->mdsc);
        flush_fs_workqueues(fsc);
 
+       /*
+        * Though the kill_anon_super() will finally trigger the
+        * sync_filesystem() anyway, we still need to do it here
+        * and then bump the stage of shutdown to stop the work
+        * queue as earlier as possible.
+        */
+       sync_filesystem(s);
+
+       fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
+
        kill_anon_super(s);
 
        fsc->client->extra_mon_dispatch = NULL;
index 9d6a3c6..566f68d 100644 (file)
@@ -889,8 +889,6 @@ static void erofs_kill_sb(struct super_block *sb)
 {
        struct erofs_sb_info *sbi;
 
-       WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
-
        /* pseudo mount for anon inodes */
        if (sb->s_flags & SB_KERNMOUNT) {
                kill_anon_super(sb);
index b69d89a..de4f121 100644 (file)
@@ -1144,10 +1144,11 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
                                         struct z_erofs_bvec *bvec)
 {
        struct z_erofs_bvec_item *item;
+       unsigned int pgnr;
 
-       if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) {
-               unsigned int pgnr;
-
+       if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
+           (bvec->end == PAGE_SIZE ||
+            bvec->offset + bvec->end == be->pcl->length)) {
                pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
                DBG_BUGON(pgnr >= be->nr_pages);
                if (!be->decompressed_pages[pgnr]) {
index 9f42f25..e918dec 100644 (file)
@@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
        }
        sbi->map_sectors = ((need_map_size - 1) >>
                        (sb->s_blocksize_bits)) + 1;
-       sbi->vol_amap = kmalloc_array(sbi->map_sectors,
+       sbi->vol_amap = kvmalloc_array(sbi->map_sectors,
                                sizeof(struct buffer_head *), GFP_KERNEL);
        if (!sbi->vol_amap)
                return -ENOMEM;
@@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
                        while (j < i)
                                brelse(sbi->vol_amap[j++]);
 
-                       kfree(sbi->vol_amap);
+                       kvfree(sbi->vol_amap);
                        sbi->vol_amap = NULL;
                        return -EIO;
                }
@@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
        for (i = 0; i < sbi->map_sectors; i++)
                __brelse(sbi->vol_amap[i]);
 
-       kfree(sbi->vol_amap);
+       kvfree(sbi->vol_amap);
 }
 
 int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
index 9575741..598081d 100644 (file)
@@ -34,6 +34,7 @@ static int exfat_get_uniname_from_ext_entry(struct super_block *sb,
 {
        int i, err;
        struct exfat_entry_set_cache es;
+       unsigned int uni_len = 0, len;
 
        err = exfat_get_dentry_set(&es, sb, p_dir, entry, ES_ALL_ENTRIES);
        if (err)
@@ -52,7 +53,10 @@ static int exfat_get_uniname_from_ext_entry(struct super_block *sb,
                if (exfat_get_entry_type(ep) != TYPE_EXTEND)
                        break;
 
-               exfat_extract_uni_name(ep, uniname);
+               len = exfat_extract_uni_name(ep, uniname);
+               uni_len += len;
+               if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH)
+                       break;
                uniname += EXFAT_FILE_NAME_LEN;
        }
 
@@ -214,7 +218,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb)
        exfat_init_namebuf(nb);
 }
 
-/* skip iterating emit_dots when dir is empty */
+/*
+ * Before calling dir_emit*(), sbi->s_lock should be released
+ * because page fault can occur in dir_emit*().
+ */
 #define ITER_POS_FILLED_DOTS    (2)
 static int exfat_iterate(struct file *file, struct dir_context *ctx)
 {
@@ -229,11 +236,10 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
        int err = 0, fake_offset = 0;
 
        exfat_init_namebuf(nb);
-       mutex_lock(&EXFAT_SB(sb)->s_lock);
 
        cpos = ctx->pos;
        if (!dir_emit_dots(file, ctx))
-               goto unlock;
+               goto out;
 
        if (ctx->pos == ITER_POS_FILLED_DOTS) {
                cpos = 0;
@@ -245,16 +251,18 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
        /* name buffer should be allocated before use */
        err = exfat_alloc_namebuf(nb);
        if (err)
-               goto unlock;
+               goto out;
 get_new:
+       mutex_lock(&EXFAT_SB(sb)->s_lock);
+
        if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
                goto end_of_dir;
 
        err = exfat_readdir(inode, &cpos, &de);
        if (err) {
                /*
-                * At least we tried to read a sector.  Move cpos to next sector
-                * position (should be aligned).
+                * At least we tried to read a sector.
+                * Move cpos to next sector position (should be aligned).
                 */
                if (err == -EIO) {
                        cpos += 1 << (sb->s_blocksize_bits);
@@ -277,16 +285,10 @@ get_new:
                inum = iunique(sb, EXFAT_ROOT_INO);
        }
 
-       /*
-        * Before calling dir_emit(), sb_lock should be released.
-        * Because page fault can occur in dir_emit() when the size
-        * of buffer given from user is larger than one page size.
-        */
        mutex_unlock(&EXFAT_SB(sb)->s_lock);
        if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum,
                        (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
-               goto out_unlocked;
-       mutex_lock(&EXFAT_SB(sb)->s_lock);
+               goto out;
        ctx->pos = cpos;
        goto get_new;
 
@@ -294,9 +296,8 @@ end_of_dir:
        if (!cpos && fake_offset)
                cpos = ITER_POS_FILLED_DOTS;
        ctx->pos = cpos;
-unlock:
        mutex_unlock(&EXFAT_SB(sb)->s_lock);
-out_unlocked:
+out:
        /*
         * To improve performance, free namebuf after unlock sb_lock.
         * If namebuf is not allocated, this function do nothing
@@ -1079,7 +1080,8 @@ rewind:
                        if (entry_type == TYPE_EXTEND) {
                                unsigned short entry_uniname[16], unichar;
 
-                               if (step != DIRENT_STEP_NAME) {
+                               if (step != DIRENT_STEP_NAME ||
+                                   name_len >= MAX_NAME_LENGTH) {
                                        step = DIRENT_STEP_FILE;
                                        continue;
                                }
index 35c62b5..dbca26e 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -1036,12 +1036,28 @@ unsigned long __fdget_raw(unsigned int fd)
        return __fget_light(fd, 0);
 }
 
+/*
+ * Try to avoid f_pos locking. We only need it if the
+ * file is marked for FMODE_ATOMIC_POS, and it can be
+ * accessed multiple ways.
+ *
+ * Always do it for directories, because pidfd_getfd()
+ * can make a file accessible even if it otherwise would
+ * not be, and for directories this is a correctness
+ * issue, not a "POSIX requirement".
+ */
+static inline bool file_needs_f_pos_lock(struct file *file)
+{
+       return (file->f_mode & FMODE_ATOMIC_POS) &&
+               (file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode));
+}
+
 unsigned long __fdget_pos(unsigned int fd)
 {
        unsigned long v = __fdget(fd);
        struct file *file = (struct file *)(v & ~3);
 
-       if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
+       if (file && file_needs_f_pos_lock(file)) {
                v |= FDPUT_POS_UNLOCK;
                mutex_lock(&file->f_pos_lock);
        }
index 8a2321d..2c9074a 100644 (file)
@@ -956,10 +956,13 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
        last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
        for (page += offset / PAGE_SIZE; page <= last_page; page++) {
                /*
-                * Skip page replacement when extending the contents
-                * of the current page.
+                * Skip page replacement when extending the contents of the
+                * current page.  But note that we may get two zero_pages in a
+                * row from shmem.
                 */
-               if (page == *(rqstp->rq_next_page - 1))
+               if (page == *(rqstp->rq_next_page - 1) &&
+                   offset_in_page(rqstp->rq_res.page_base +
+                                  rqstp->rq_res.page_len))
                        continue;
                if (unlikely(!svc_rqst_replace_page(rqstp, page)))
                        return -EIO;
index 402a8c1..a8f4b65 100644 (file)
@@ -190,7 +190,7 @@ int hv_common_cpu_die(unsigned int cpu);
 
 void *hv_alloc_hyperv_page(void);
 void *hv_alloc_hyperv_zeroed_page(void);
-void hv_free_hyperv_page(unsigned long addr);
+void hv_free_hyperv_page(void *addr);
 
 /**
  * hv_cpu_number_to_vp_number() - Map CPU to VP.
index 20c93f0..95a1d21 100644 (file)
@@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask)
        return (mask >> 8) ? byte : byte + 1;
 }
 
-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
 {
        unsigned long rhs = val | c->low_bits;
        *data = rhs;
index 0d2e2a3..f10fb87 100644 (file)
@@ -175,8 +175,8 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
 
 /**
  * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
+ * @srcp1: the first input
+ * @srcp2: the second input
  *
  * Returns >= nr_cpu_ids if no cpus set in both.  See also cpumask_next_and().
  */
@@ -1197,6 +1197,10 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
 /**
  * cpumap_print_list_to_buf  - copies the cpumask into the buffer as
  *     comma-separated list of cpus
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
  *
  * Everything is same with the above cpumap_print_bitmask_to_buf()
  * except the print format.
index bfbc37c..3ac3974 100644 (file)
@@ -1239,9 +1239,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
                                     u32 *buffer_actual_len,
                                     u64 *requestid);
 
-
-extern void vmbus_ontimer(unsigned long data);
-
 /* Base driver object */
 struct hv_driver {
        const char *name;
index e6936cb..33f21bd 100644 (file)
@@ -100,10 +100,16 @@ struct rapl_package;
 
 #define RAPL_DOMAIN_NAME_LENGTH 16
 
+union rapl_reg {
+       void __iomem *mmio;
+       u32 msr;
+       u64 val;
+};
+
 struct rapl_domain {
        char name[RAPL_DOMAIN_NAME_LENGTH];
        enum rapl_domain_type id;
-       u64 regs[RAPL_DOMAIN_REG_MAX];
+       union rapl_reg regs[RAPL_DOMAIN_REG_MAX];
        struct powercap_zone power_zone;
        struct rapl_domain_data rdd;
        struct rapl_power_limit rpl[NR_POWER_LIMITS];
@@ -116,7 +122,7 @@ struct rapl_domain {
 };
 
 struct reg_action {
-       u64 reg;
+       union rapl_reg reg;
        u64 mask;
        u64 value;
        int err;
@@ -143,8 +149,8 @@ struct rapl_if_priv {
        enum rapl_if_type type;
        struct powercap_control_type *control_type;
        enum cpuhp_state pcap_rapl_online;
-       u64 reg_unit;
-       u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+       union rapl_reg reg_unit;
+       union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
        int limits[RAPL_DOMAIN_MAX];
        int (*read_raw)(int id, struct reg_action *ra);
        int (*write_raw)(int id, struct reg_action *ra);
index 0b85761..fc6c151 100644 (file)
@@ -15,4 +15,6 @@ struct corgi_lcd_platform_data {
        void (*kick_battery)(void);
 };
 
+void corgi_lcd_limit_intensity(int limit);
+
 #endif /* __LINUX_SPI_CORGI_LCD_H */
index 8e984d7..6b0a7dc 100644 (file)
@@ -101,6 +101,7 @@ struct spi_mem_op {
                u8 nbytes;
                u8 buswidth;
                u8 dtr : 1;
+               u8 __pad : 7;
                u16 opcode;
        } cmd;
 
@@ -108,6 +109,7 @@ struct spi_mem_op {
                u8 nbytes;
                u8 buswidth;
                u8 dtr : 1;
+               u8 __pad : 7;
                u64 val;
        } addr;
 
@@ -115,12 +117,14 @@ struct spi_mem_op {
                u8 nbytes;
                u8 buswidth;
                u8 dtr : 1;
+               u8 __pad : 7;
        } dummy;
 
        struct {
                u8 buswidth;
                u8 dtr : 1;
                u8 ecc : 1;
+               u8 __pad : 6;
                enum spi_mem_data_dir dir;
                unsigned int nbytes;
                union {
index 75efa6f..88644b3 100644 (file)
@@ -452,6 +452,49 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
                gro_normal_list(napi);
 }
 
+/* This function is the alternative of 'inet_iif' and 'inet_sdif'
+ * functions in case we can not rely on fields of IPCB.
+ *
+ * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
+ * The caller must hold the RCU read lock.
+ */
+static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
+{
+       *iif = inet_iif(skb) ?: skb->dev->ifindex;
+       *sdif = 0;
+
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+       if (netif_is_l3_slave(skb->dev)) {
+               struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
+
+               *sdif = *iif;
+               *iif = master ? master->ifindex : 0;
+       }
+#endif
+}
+
+/* This function is the alternative of 'inet6_iif' and 'inet6_sdif'
+ * functions in case we can not rely on fields of IP6CB.
+ *
+ * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
+ * The caller must hold the RCU read lock.
+ */
+static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
+{
+       /* using skb->dev->ifindex because skb_dst(skb) is not initialized */
+       *iif = skb->dev->ifindex;
+       *sdif = 0;
+
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+       if (netif_is_l3_slave(skb->dev)) {
+               struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
+
+               *sdif = *iif;
+               *iif = master ? master->ifindex : 0;
+       }
+#endif
+}
+
 extern struct list_head offload_base;
 
 #endif /* _NET_IPV6_GRO_H */
index caa20a9..0bb32bf 100644 (file)
@@ -107,11 +107,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
 
 static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
 {
-       if (!sk->sk_mark &&
-           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
+       u32 mark = READ_ONCE(sk->sk_mark);
+
+       if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
                return skb->mark;
 
-       return sk->sk_mark;
+       return mark;
 }
 
 static inline int inet_request_bound_dev_if(const struct sock *sk,
index 50d4358..3325211 100644 (file)
@@ -93,7 +93,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
 {
        ipcm_init(ipcm);
 
-       ipcm->sockc.mark = inet->sk.sk_mark;
+       ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
        ipcm->sockc.tsflags = inet->sk.sk_tsflags;
        ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
        ipcm->addr = inet->inet_saddr;
index 5a5c726..8c2a8e7 100644 (file)
@@ -168,7 +168,7 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
                                                   __be16 dport, __be16 sport,
                                                   __u8 proto, __u8 tos, int oif)
 {
-       flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
+       flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
                           RT_SCOPE_UNIVERSE, proto,
                           sk ? inet_sk_flowi_flags(sk) : 0,
                           daddr, saddr, dport, sport, sock_net_uid(net, sk));
@@ -301,7 +301,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
        if (inet_sk(sk)->transparent)
                flow_flags |= FLOWI_FLAG_ANYSRC;
 
-       flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk),
+       flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
                           ip_sock_rt_scope(sk), protocol, flow_flags, dst,
                           src, dport, sport, sk->sk_uid);
 }
index 1648240..6a9f8a5 100644 (file)
@@ -556,12 +556,12 @@ static inline void vxlan_flag_attr_error(int attrtype,
 }
 
 static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
-                                           int hash,
+                                           u32 hash,
                                            struct vxlan_rdst *rdst)
 {
        struct fib_nh_common *nhc;
 
-       nhc = nexthop_path_fdb_result(nh, hash);
+       nhc = nexthop_path_fdb_result(nh, hash >> 1);
        if (unlikely(!nhc))
                return false;
 
index fc30014..a5ef849 100644 (file)
@@ -175,6 +175,9 @@ struct tegra_mc_icc_ops {
        int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
 };
 
+struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data);
+extern const struct tegra_mc_icc_ops tegra_mc_icc_ops;
+
 struct tegra_mc_ops {
        /*
         * @probe: Callback to set up SoC-specific bits of the memory controller. This is called
index 7865f5a..4f3932b 100644 (file)
@@ -710,9 +710,11 @@ enum {
        TCA_FLOWER_KEY_CFM_OPT_UNSPEC,
        TCA_FLOWER_KEY_CFM_MD_LEVEL,
        TCA_FLOWER_KEY_CFM_OPCODE,
-       TCA_FLOWER_KEY_CFM_OPT_MAX,
+       __TCA_FLOWER_KEY_CFM_OPT_MAX,
 };
 
+#define TCA_FLOWER_KEY_CFM_OPT_MAX (__TCA_FLOWER_KEY_CFM_OPT_MAX - 1)
+
 #define TCA_FLOWER_MASK_FLAGS_RANGE    (1 << 0) /* Range-based match */
 
 /* Match-all classifier */
index 6ae02be..286ab3d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
+#include <linux/completion.h>
 #include <trace/events/xdp.h>
 #include <linux/btf_ids.h>
 
@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry {
        struct rcu_head rcu;
 
        struct work_struct kthread_stop_wq;
+       struct completion kthread_running;
 };
 
 struct bpf_cpu_map {
@@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
         * invoked cpu_map_kthread_stop(). Catch any broken behaviour
         * gracefully and warn once.
         */
-       struct xdp_frame *xdpf;
+       void *ptr;
 
-       while ((xdpf = ptr_ring_consume(ring)))
-               if (WARN_ON_ONCE(xdpf))
-                       xdp_return_frame(xdpf);
+       while ((ptr = ptr_ring_consume(ring))) {
+               WARN_ON_ONCE(1);
+               if (unlikely(__ptr_test_bit(0, &ptr))) {
+                       __ptr_clear_bit(0, &ptr);
+                       kfree_skb(ptr);
+                       continue;
+               }
+               xdp_return_frame(ptr);
+       }
 }
 
 static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
@@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
 static void cpu_map_kthread_stop(struct work_struct *work)
 {
        struct bpf_cpu_map_entry *rcpu;
-       int err;
 
        rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
 
@@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
        rcu_barrier();
 
        /* kthread_stop will wake_up_process and wait for it to complete */
-       err = kthread_stop(rcpu->kthread);
-       if (err) {
-               /* kthread_stop may be called before cpu_map_kthread_run
-                * is executed, so we need to release the memory related
-                * to rcpu.
-                */
-               put_cpu_map_entry(rcpu);
-       }
+       kthread_stop(rcpu->kthread);
 }
 
 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
@@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
        return nframes;
 }
 
-
 static int cpu_map_kthread_run(void *data)
 {
        struct bpf_cpu_map_entry *rcpu = data;
 
+       complete(&rcpu->kthread_running);
        set_current_state(TASK_INTERRUPTIBLE);
 
        /* When kthread gives stop order, then rcpu have been disconnected
@@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
                goto free_ptr_ring;
 
        /* Setup kthread */
+       init_completion(&rcpu->kthread_running);
        rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
                                               "cpumap/%d/map:%d", cpu,
                                               map->id);
@@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
        kthread_bind(rcpu->kthread, cpu);
        wake_up_process(rcpu->kthread);
 
+       /* Make sure kthread has been running, so kthread_stop() will not
+        * stop the kthread prematurely and all pending frames or skbs
+        * will be handled by the kthread before kthread_stop() returns.
+        */
+       wait_for_completion(&rcpu->kthread_running);
+
        return rcpu;
 
 free_prog:
index 5f2dcab..bd1a42b 100644 (file)
@@ -661,8 +661,7 @@ static DEFINE_PER_CPU(int, bpf_trace_nest_level);
 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
           u64, flags, void *, data, u64, size)
 {
-       struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
-       int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
+       struct bpf_trace_sample_data *sds;
        struct perf_raw_record raw = {
                .frag = {
                        .size = size,
@@ -670,7 +669,11 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
                },
        };
        struct perf_sample_data *sd;
-       int err;
+       int nest_level, err;
+
+       preempt_disable();
+       sds = this_cpu_ptr(&bpf_trace_sds);
+       nest_level = this_cpu_inc_return(bpf_trace_nest_level);
 
        if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
                err = -EBUSY;
@@ -688,9 +691,9 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
        perf_sample_save_raw_data(sd, &raw);
 
        err = __bpf_perf_event_output(regs, map, flags, sd);
-
 out:
        this_cpu_dec(bpf_trace_nest_level);
+       preempt_enable();
        return err;
 }
 
@@ -715,7 +718,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
                     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
 {
-       int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
        struct perf_raw_frag frag = {
                .copy           = ctx_copy,
                .size           = ctx_size,
@@ -732,8 +734,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
        };
        struct perf_sample_data *sd;
        struct pt_regs *regs;
+       int nest_level;
        u64 ret;
 
+       preempt_disable();
+       nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
+
        if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
                ret = -EBUSY;
                goto out;
@@ -748,6 +754,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
        ret = __bpf_perf_event_output(regs, map, flags, sd);
 out:
        this_cpu_dec(bpf_event_output_nest_level);
+       preempt_enable();
        return ret;
 }
 
index 42d307a..1ffae65 100644 (file)
@@ -82,7 +82,13 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
 obj-$(CONFIG_TEST_PRINTF) += test_printf.o
 obj-$(CONFIG_TEST_SCANF) += test_scanf.o
+
 obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
+# FIXME: Clang breaks test_bitmap_const_eval when KASAN and GCOV are enabled
+GCOV_PROFILE_test_bitmap.o := n
+endif
+
 obj-$(CONFIG_TEST_UUID) += test_uuid.o
 obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
 obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
index de356f1..a7fd02b 100644 (file)
@@ -45,6 +45,7 @@ EXPORT_SYMBOL(cpumask_next_wrap);
  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
  * @mask: pointer to cpumask_var_t where the cpumask is returned
  * @flags: GFP_ flags
+ * @node: memory node from which to allocate or %NUMA_NO_NODE
  *
  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
  * a nop returning a constant 1 (in <linux/cpumask.h>)
@@ -157,7 +158,9 @@ EXPORT_SYMBOL(cpumask_local_spread);
 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
 
 /**
- * cpumask_any_and_distribute - Return an arbitrary cpu within srcp1 & srcp2.
+ * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
+ * @src1p: first &cpumask for intersection
+ * @src2p: second &cpumask for intersection
  *
  * Iterated calls using the same srcp1 and srcp2 will be distributed within
  * their intersection.
index 187f5b2..f2ea9f3 100644 (file)
@@ -1161,6 +1161,10 @@ static void __init test_bitmap_print_buf(void)
        }
 }
 
+/*
+ * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled.
+ * To workaround it, GCOV is force-disabled in Makefile for this configuration.
+ */
 static void __init test_bitmap_const_eval(void)
 {
        DECLARE_BITMAP(bitmap, BITS_PER_LONG);
@@ -1186,11 +1190,7 @@ static void __init test_bitmap_const_eval(void)
         * the compiler is fixed.
         */
        bitmap_clear(bitmap, 0, BITS_PER_LONG);
-#if defined(__s390__) && defined(__clang__)
-       if (!const_test_bit(7, bitmap))
-#else
        if (!test_bit(7, bitmap))
-#endif
                bitmap_set(bitmap, 5, 2);
 
        /* Equals to `unsigned long bitopvar = BIT(20)` */
index ba6b52b..e10f593 100644 (file)
@@ -865,7 +865,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
        skb->dev = dev;
        skb->priority = sk->sk_priority;
-       skb->mark = sk->sk_mark;
+       skb->mark = READ_ONCE(sk->sk_mark);
        skb->tstamp = sockc.transmit_time;
 
        skb_setup_tx_timestamp(skb, sockc.tsflags);
index 11c04e7..658a6f2 100644 (file)
@@ -3334,17 +3334,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
        int ret;
 
        dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
-       ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
+       ret = wait_for_completion_killable(&lreq->reg_commit_wait);
        return ret ?: lreq->reg_commit_error;
 }
 
-static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
+static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
+                                    unsigned long timeout)
 {
-       int ret;
+       long left;
 
        dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
-       ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
-       return ret ?: lreq->notify_finish_error;
+       left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
+                                               ceph_timeout_jiffies(timeout));
+       if (left <= 0)
+               left = left ?: -ETIMEDOUT;
+       else
+               left = lreq->notify_finish_error; /* completed */
+
+       return left;
 }
 
 /*
@@ -4896,7 +4903,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
        linger_submit(lreq);
        ret = linger_reg_commit_wait(lreq);
        if (!ret)
-               ret = linger_notify_finish_wait(lreq);
+               ret = linger_notify_finish_wait(lreq,
+                                msecs_to_jiffies(2 * timeout * MSEC_PER_SEC));
        else
                dout("lreq %p failed to initiate notify %d\n", lreq, ret);
 
index d417253..cca7594 100644 (file)
@@ -496,8 +496,11 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
                return ERR_PTR(-EPERM);
 
        nla_for_each_nested(nla, nla_stgs, rem) {
-               if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
+               if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) {
+                       if (nla_len(nla) != sizeof(u32))
+                               return ERR_PTR(-EINVAL);
                        nr_maps++;
+               }
        }
 
        diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
index 3ad4e03..aef25aa 100644 (file)
@@ -5140,13 +5140,17 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
        br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
        if (br_spec) {
                nla_for_each_nested(attr, br_spec, rem) {
-                       if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
+                       if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
                                if (nla_len(attr) < sizeof(flags))
                                        return -EINVAL;
 
                                have_flags = true;
                                flags = nla_get_u16(attr);
-                               break;
+                       }
+
+                       if (nla_type(attr) == IFLA_BRIDGE_MODE) {
+                               if (nla_len(attr) < sizeof(u16))
+                                       return -EINVAL;
                        }
                }
        }
index 9370fd5..6d4f28e 100644 (file)
@@ -429,6 +429,7 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
 {
        struct __kernel_sock_timeval tv;
        int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval);
+       long val;
 
        if (err)
                return err;
@@ -439,7 +440,7 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
        if (tv.tv_sec < 0) {
                static int warned __read_mostly;
 
-               *timeo_p = 0;
+               WRITE_ONCE(*timeo_p, 0);
                if (warned < 10 && net_ratelimit()) {
                        warned++;
                        pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
@@ -447,11 +448,12 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
                }
                return 0;
        }
-       *timeo_p = MAX_SCHEDULE_TIMEOUT;
-       if (tv.tv_sec == 0 && tv.tv_usec == 0)
-               return 0;
-       if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
-               *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
+       val = MAX_SCHEDULE_TIMEOUT;
+       if ((tv.tv_sec || tv.tv_usec) &&
+           (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)))
+               val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec,
+                                                   USEC_PER_SEC / HZ);
+       WRITE_ONCE(*timeo_p, val);
        return 0;
 }
 
@@ -804,7 +806,7 @@ EXPORT_SYMBOL(sock_no_linger);
 void sock_set_priority(struct sock *sk, u32 priority)
 {
        lock_sock(sk);
-       sk->sk_priority = priority;
+       WRITE_ONCE(sk->sk_priority, priority);
        release_sock(sk);
 }
 EXPORT_SYMBOL(sock_set_priority);
@@ -813,9 +815,9 @@ void sock_set_sndtimeo(struct sock *sk, s64 secs)
 {
        lock_sock(sk);
        if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
-               sk->sk_sndtimeo = secs * HZ;
+               WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
        else
-               sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+               WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
        release_sock(sk);
 }
 EXPORT_SYMBOL(sock_set_sndtimeo);
@@ -988,7 +990,7 @@ EXPORT_SYMBOL(sock_set_rcvbuf);
 static void __sock_set_mark(struct sock *sk, u32 val)
 {
        if (val != sk->sk_mark) {
-               sk->sk_mark = val;
+               WRITE_ONCE(sk->sk_mark, val);
                sk_dst_reset(sk);
        }
 }
@@ -1007,7 +1009,7 @@ static void sock_release_reserved_memory(struct sock *sk, int bytes)
        bytes = round_down(bytes, PAGE_SIZE);
 
        WARN_ON(bytes > sk->sk_reserved_mem);
-       sk->sk_reserved_mem -= bytes;
+       WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes);
        sk_mem_reclaim(sk);
 }
 
@@ -1044,7 +1046,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
        }
        sk->sk_forward_alloc += pages << PAGE_SHIFT;
 
-       sk->sk_reserved_mem += pages << PAGE_SHIFT;
+       WRITE_ONCE(sk->sk_reserved_mem,
+                  sk->sk_reserved_mem + (pages << PAGE_SHIFT));
 
        return 0;
 }
@@ -1213,7 +1216,7 @@ set_sndbuf:
                if ((val >= 0 && val <= 6) ||
                    sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
                    sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-                       sk->sk_priority = val;
+                       WRITE_ONCE(sk->sk_priority, val);
                else
                        ret = -EPERM;
                break;
@@ -1438,7 +1441,8 @@ set_sndbuf:
                        cmpxchg(&sk->sk_pacing_status,
                                SK_PACING_NONE,
                                SK_PACING_NEEDED);
-               sk->sk_max_pacing_rate = ulval;
+               /* Pairs with READ_ONCE() from sk_getsockopt() */
+               WRITE_ONCE(sk->sk_max_pacing_rate, ulval);
                sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
                break;
                }
@@ -1533,7 +1537,9 @@ set_sndbuf:
                }
                if ((u8)val == SOCK_TXREHASH_DEFAULT)
                        val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
-               /* Paired with READ_ONCE() in tcp_rtx_synack() */
+               /* Paired with READ_ONCE() in tcp_rtx_synack()
+                * and sk_getsockopt().
+                */
                WRITE_ONCE(sk->sk_txrehash, (u8)val);
                break;
 
@@ -1633,11 +1639,11 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
                break;
 
        case SO_SNDBUF:
-               v.val = sk->sk_sndbuf;
+               v.val = READ_ONCE(sk->sk_sndbuf);
                break;
 
        case SO_RCVBUF:
-               v.val = sk->sk_rcvbuf;
+               v.val = READ_ONCE(sk->sk_rcvbuf);
                break;
 
        case SO_REUSEADDR:
@@ -1679,7 +1685,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
                break;
 
        case SO_PRIORITY:
-               v.val = sk->sk_priority;
+               v.val = READ_ONCE(sk->sk_priority);
                break;
 
        case SO_LINGER:
@@ -1717,16 +1723,18 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
 
        case SO_RCVTIMEO_OLD:
        case SO_RCVTIMEO_NEW:
-               lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
+               lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v,
+                                     SO_RCVTIMEO_OLD == optname);
                break;
 
        case SO_SNDTIMEO_OLD:
        case SO_SNDTIMEO_NEW:
-               lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
+               lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v,
+                                     SO_SNDTIMEO_OLD == optname);
                break;
 
        case SO_RCVLOWAT:
-               v.val = sk->sk_rcvlowat;
+               v.val = READ_ONCE(sk->sk_rcvlowat);
                break;
 
        case SO_SNDLOWAT:
@@ -1843,7 +1851,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
                                                         optval, optlen, len);
 
        case SO_MARK:
-               v.val = sk->sk_mark;
+               v.val = READ_ONCE(sk->sk_mark);
                break;
 
        case SO_RCVMARK:
@@ -1862,7 +1870,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
                if (!sock->ops->set_peek_off)
                        return -EOPNOTSUPP;
 
-               v.val = sk->sk_peek_off;
+               v.val = READ_ONCE(sk->sk_peek_off);
                break;
        case SO_NOFCS:
                v.val = sock_flag(sk, SOCK_NOFCS);
@@ -1892,7 +1900,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
        case SO_BUSY_POLL:
-               v.val = sk->sk_ll_usec;
+               v.val = READ_ONCE(sk->sk_ll_usec);
                break;
        case SO_PREFER_BUSY_POLL:
                v.val = READ_ONCE(sk->sk_prefer_busy_poll);
@@ -1900,12 +1908,14 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
 #endif
 
        case SO_MAX_PACING_RATE:
+               /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */
                if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
                        lv = sizeof(v.ulval);
-                       v.ulval = sk->sk_max_pacing_rate;
+                       v.ulval = READ_ONCE(sk->sk_max_pacing_rate);
                } else {
                        /* 32bit version */
-                       v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
+                       v.val = min_t(unsigned long, ~0U,
+                                     READ_ONCE(sk->sk_max_pacing_rate));
                }
                break;
 
@@ -1973,11 +1983,12 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
                break;
 
        case SO_RESERVE_MEM:
-               v.val = sk->sk_reserved_mem;
+               v.val = READ_ONCE(sk->sk_reserved_mem);
                break;
 
        case SO_TXREHASH:
-               v.val = sk->sk_txrehash;
+               /* Paired with WRITE_ONCE() in sk_setsockopt() */
+               v.val = READ_ONCE(sk->sk_txrehash);
                break;
 
        default:
@@ -3168,7 +3179,7 @@ EXPORT_SYMBOL(__sk_mem_reclaim);
 
 int sk_set_peek_off(struct sock *sk, int val)
 {
-       sk->sk_peek_off = val;
+       WRITE_ONCE(sk->sk_peek_off, val);
        return 0;
 }
 EXPORT_SYMBOL_GPL(sk_set_peek_off);
index 19538d6..08ab108 100644 (file)
@@ -115,7 +115,6 @@ static void sock_map_sk_acquire(struct sock *sk)
        __acquires(&sk->sk_lock.slock)
 {
        lock_sock(sk);
-       preempt_disable();
        rcu_read_lock();
 }
 
@@ -123,7 +122,6 @@ static void sock_map_sk_release(struct sock *sk)
        __releases(&sk->sk_lock.slock)
 {
        rcu_read_unlock();
-       preempt_enable();
        release_sock(sk);
 }
 
index c0c4381..2e6b8c8 100644 (file)
@@ -980,7 +980,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
                return -EOPNOTSUPP;
 
        ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
-                                         tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
+                                         tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
                                          NULL);
        if (ret)
                return ret;
index 7249ef2..d29d116 100644 (file)
@@ -238,8 +238,8 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
                opt = ireq->ipv6_opt;
                if (!opt)
                        opt = rcu_dereference(np->opt);
-               err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass,
-                              sk->sk_priority);
+               err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt,
+                              np->tclass, sk->sk_priority);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
index 0ce8fd3..2f6195d 100644 (file)
@@ -1727,8 +1727,15 @@ int dsa_port_phylink_create(struct dsa_port *dp)
            ds->ops->phylink_mac_an_restart)
                dp->pl_config.legacy_pre_march2020 = true;
 
-       if (ds->ops->phylink_get_caps)
+       if (ds->ops->phylink_get_caps) {
                ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
+       } else {
+               /* For legacy drivers */
+               __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+                         dp->pl_config.supported_interfaces);
+               __set_bit(PHY_INTERFACE_MODE_GMII,
+                         dp->pl_config.supported_interfaces);
+       }
 
        pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
                            mode, &dsa_port_phylink_mac_ops);
index b812eb3..f742692 100644 (file)
@@ -150,7 +150,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
        }
 #endif
 
-       if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
+       if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, READ_ONCE(sk->sk_mark)))
                goto errout;
 
        if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
@@ -799,7 +799,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
        entry.ifindex = sk->sk_bound_dev_if;
        entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
        if (sk_fullsock(sk))
-               entry.mark = sk->sk_mark;
+               entry.mark = READ_ONCE(sk->sk_mark);
        else if (sk->sk_state == TCP_NEW_SYN_RECV)
                entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
        else if (sk->sk_state == TCP_TIME_WAIT)
index 6e70839..6ba1a0f 100644 (file)
@@ -184,9 +184,9 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
                ip_options_build(skb, &opt->opt, daddr, rt);
        }
 
-       skb->priority = sk->sk_priority;
+       skb->priority = READ_ONCE(sk->sk_priority);
        if (!skb->mark)
-               skb->mark = sk->sk_mark;
+               skb->mark = READ_ONCE(sk->sk_mark);
 
        /* Send it out. */
        return ip_local_out(net, skb->sk, skb);
@@ -528,8 +528,8 @@ packet_routed:
                             skb_shinfo(skb)->gso_segs ?: 1);
 
        /* TODO : should we use skb->sk here instead of sk ? */
-       skb->priority = sk->sk_priority;
-       skb->mark = sk->sk_mark;
+       skb->priority = READ_ONCE(sk->sk_priority);
+       skb->mark = READ_ONCE(sk->sk_mark);
 
        res = ip_local_out(net, sk, skb);
        rcu_read_unlock();
@@ -1158,10 +1158,15 @@ alloc_new_skb:
                        }
 
                        copy = datalen - transhdrlen - fraggap - pagedlen;
+                       /* [!] NOTE: copy will be negative if pagedlen>0
+                        * because then the equation reduces to -fraggap.
+                        */
                        if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
                                err = -EFAULT;
                                kfree_skb(skb);
                                goto error;
+                       } else if (flags & MSG_SPLICE_PAGES) {
+                               copy = 0;
                        }
 
                        offset += copy;
@@ -1209,6 +1214,10 @@ alloc_new_skb:
                } else if (flags & MSG_SPLICE_PAGES) {
                        struct msghdr *msg = from;
 
+                       err = -EIO;
+                       if (WARN_ON_ONCE(copy > msg->msg_iter.count))
+                               goto error;
+
                        err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
                                                   sk->sk_allocation);
                        if (err < 0)
index 8e97d8d..d41bce8 100644 (file)
@@ -592,7 +592,7 @@ void __ip_sock_set_tos(struct sock *sk, int val)
        }
        if (inet_sk(sk)->tos != val) {
                inet_sk(sk)->tos = val;
-               sk->sk_priority = rt_tos2priority(val);
+               WRITE_ONCE(sk->sk_priority, rt_tos2priority(val));
                sk_dst_reset(sk);
        }
 }
index 7782ff5..cb381f5 100644 (file)
@@ -348,7 +348,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
                goto error;
        skb_reserve(skb, hlen);
 
-       skb->priority = sk->sk_priority;
+       skb->priority = READ_ONCE(sk->sk_priority);
        skb->mark = sockc->mark;
        skb->tstamp = sockc->transmit_time;
        skb_dst_set(skb, &rt->dst);
index 98d7e6b..92fede3 100644 (file)
@@ -518,7 +518,7 @@ static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
                const struct inet_sock *inet = inet_sk(sk);
 
                oif = sk->sk_bound_dev_if;
-               mark = sk->sk_mark;
+               mark = READ_ONCE(sk->sk_mark);
                tos = ip_sock_rt_tos(sk);
                scope = ip_sock_rt_scope(sk);
                prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
@@ -552,7 +552,7 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
        inet_opt = rcu_dereference(inet->inet_opt);
        if (inet_opt && inet_opt->opt.srr)
                daddr = inet_opt->opt.faddr;
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
                           ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
                           ip_sock_rt_scope(sk),
                           inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
index 0696420..a59cc4b 100644 (file)
@@ -931,9 +931,9 @@ static void tcp_v4_send_ack(const struct sock *sk,
        ctl_sk = this_cpu_read(ipv4_tcp_sk);
        sock_net_set(ctl_sk, net);
        ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
-                          inet_twsk(sk)->tw_mark : sk->sk_mark;
+                          inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
        ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
-                          inet_twsk(sk)->tw_priority : sk->sk_priority;
+                          inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
        transmit_time = tcp_transmit_time(sk);
        ip_send_unicast_reply(ctl_sk,
                              skb, &TCP_SKB_CB(skb)->header.h4.opt,
index 82f4575..99ac5ef 100644 (file)
@@ -40,7 +40,7 @@ struct tcp_fastopen_metrics {
 
 struct tcp_metrics_block {
        struct tcp_metrics_block __rcu  *tcpm_next;
-       possible_net_t                  tcpm_net;
+       struct net                      *tcpm_net;
        struct inetpeer_addr            tcpm_saddr;
        struct inetpeer_addr            tcpm_daddr;
        unsigned long                   tcpm_stamp;
@@ -51,34 +51,38 @@ struct tcp_metrics_block {
        struct rcu_head                 rcu_head;
 };
 
-static inline struct net *tm_net(struct tcp_metrics_block *tm)
+static inline struct net *tm_net(const struct tcp_metrics_block *tm)
 {
-       return read_pnet(&tm->tcpm_net);
+       /* Paired with the WRITE_ONCE() in tcpm_new() */
+       return READ_ONCE(tm->tcpm_net);
 }
 
 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
                              enum tcp_metric_index idx)
 {
-       return tm->tcpm_lock & (1 << idx);
+       /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
+       return READ_ONCE(tm->tcpm_lock) & (1 << idx);
 }
 
-static u32 tcp_metric_get(struct tcp_metrics_block *tm,
+static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
                          enum tcp_metric_index idx)
 {
-       return tm->tcpm_vals[idx];
+       /* Paired with WRITE_ONCE() in tcp_metric_set() */
+       return READ_ONCE(tm->tcpm_vals[idx]);
 }
 
 static void tcp_metric_set(struct tcp_metrics_block *tm,
                           enum tcp_metric_index idx,
                           u32 val)
 {
-       tm->tcpm_vals[idx] = val;
+       /* Paired with READ_ONCE() in tcp_metric_get() */
+       WRITE_ONCE(tm->tcpm_vals[idx], val);
 }
 
 static bool addr_same(const struct inetpeer_addr *a,
                      const struct inetpeer_addr *b)
 {
-       return inetpeer_addr_cmp(a, b) == 0;
+       return (a->family == b->family) && !inetpeer_addr_cmp(a, b);
 }
 
 struct tcpm_hash_bucket {
@@ -89,6 +93,7 @@ static struct tcpm_hash_bucket        *tcp_metrics_hash __read_mostly;
 static unsigned int            tcp_metrics_hash_log __read_mostly;
 
 static DEFINE_SPINLOCK(tcp_metrics_lock);
+static DEFINE_SEQLOCK(fastopen_seqlock);
 
 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
                          const struct dst_entry *dst,
@@ -97,7 +102,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
        u32 msval;
        u32 val;
 
-       tm->tcpm_stamp = jiffies;
+       WRITE_ONCE(tm->tcpm_stamp, jiffies);
 
        val = 0;
        if (dst_metric_locked(dst, RTAX_RTT))
@@ -110,30 +115,42 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
                val |= 1 << TCP_METRIC_CWND;
        if (dst_metric_locked(dst, RTAX_REORDERING))
                val |= 1 << TCP_METRIC_REORDERING;
-       tm->tcpm_lock = val;
+       /* Paired with READ_ONCE() in tcp_metric_locked() */
+       WRITE_ONCE(tm->tcpm_lock, val);
 
        msval = dst_metric_raw(dst, RTAX_RTT);
-       tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
+       tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
 
        msval = dst_metric_raw(dst, RTAX_RTTVAR);
-       tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
-       tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
-       tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
-       tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
+       tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
+       tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+                      dst_metric_raw(dst, RTAX_SSTHRESH));
+       tcp_metric_set(tm, TCP_METRIC_CWND,
+                      dst_metric_raw(dst, RTAX_CWND));
+       tcp_metric_set(tm, TCP_METRIC_REORDERING,
+                      dst_metric_raw(dst, RTAX_REORDERING));
        if (fastopen_clear) {
+               write_seqlock(&fastopen_seqlock);
                tm->tcpm_fastopen.mss = 0;
                tm->tcpm_fastopen.syn_loss = 0;
                tm->tcpm_fastopen.try_exp = 0;
                tm->tcpm_fastopen.cookie.exp = false;
                tm->tcpm_fastopen.cookie.len = 0;
+               write_sequnlock(&fastopen_seqlock);
        }
 }
 
 #define TCP_METRICS_TIMEOUT            (60 * 60 * HZ)
 
-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+static void tcpm_check_stamp(struct tcp_metrics_block *tm,
+                            const struct dst_entry *dst)
 {
-       if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+       unsigned long limit;
+
+       if (!tm)
+               return;
+       limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
+       if (unlikely(time_after(jiffies, limit)))
                tcpm_suck_dst(tm, dst, false);
 }
 
@@ -174,20 +191,23 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
                oldest = deref_locked(tcp_metrics_hash[hash].chain);
                for (tm = deref_locked(oldest->tcpm_next); tm;
                     tm = deref_locked(tm->tcpm_next)) {
-                       if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
+                       if (time_before(READ_ONCE(tm->tcpm_stamp),
+                                       READ_ONCE(oldest->tcpm_stamp)))
                                oldest = tm;
                }
                tm = oldest;
        } else {
-               tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
+               tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
                if (!tm)
                        goto out_unlock;
        }
-       write_pnet(&tm->tcpm_net, net);
+       /* Paired with the READ_ONCE() in tm_net() */
+       WRITE_ONCE(tm->tcpm_net, net);
+
        tm->tcpm_saddr = *saddr;
        tm->tcpm_daddr = *daddr;
 
-       tcpm_suck_dst(tm, dst, true);
+       tcpm_suck_dst(tm, dst, reclaim);
 
        if (likely(!reclaim)) {
                tm->tcpm_next = tcp_metrics_hash[hash].chain;
@@ -434,7 +454,7 @@ void tcp_update_metrics(struct sock *sk)
                                               tp->reordering);
                }
        }
-       tm->tcpm_stamp = jiffies;
+       WRITE_ONCE(tm->tcpm_stamp, jiffies);
 out_unlock:
        rcu_read_unlock();
 }
@@ -539,8 +559,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
        return ret;
 }
 
-static DEFINE_SEQLOCK(fastopen_seqlock);
-
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
                            struct tcp_fastopen_cookie *cookie)
 {
@@ -647,7 +665,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
        }
 
        if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
-                         jiffies - tm->tcpm_stamp,
+                         jiffies - READ_ONCE(tm->tcpm_stamp),
                          TCP_METRICS_ATTR_PAD) < 0)
                goto nla_put_failure;
 
@@ -658,7 +676,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
                if (!nest)
                        goto nla_put_failure;
                for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
-                       u32 val = tm->tcpm_vals[i];
+                       u32 val = tcp_metric_get(tm, i);
 
                        if (!val)
                                continue;
index 42a96b3..abfa860 100644 (file)
 #include <net/sock_reuseport.h>
 #include <net/addrconf.h>
 #include <net/udp_tunnel.h>
+#include <net/gro.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6_stubs.h>
 #endif
@@ -555,10 +556,13 @@ struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct net *net = dev_net(skb->dev);
+       int iif, sdif;
+
+       inet_get_iif_sdif(skb, &iif, &sdif);
 
        return __udp4_lib_lookup(net, iph->saddr, sport,
-                                iph->daddr, dport, inet_iif(skb),
-                                inet_sdif(skb), net->ipv4.udp_table, NULL);
+                                iph->daddr, dport, iif,
+                                sdif, net->ipv4.udp_table, NULL);
 }
 
 /* Must be called under rcu_read_lock().
index f402946..0f46b3c 100644 (file)
@@ -609,10 +609,13 @@ static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
 {
        const struct iphdr *iph = skb_gro_network_header(skb);
        struct net *net = dev_net(skb->dev);
+       int iif, sdif;
+
+       inet_get_iif_sdif(skb, &iif, &sdif);
 
        return __udp4_lib_lookup(net, iph->saddr, sport,
-                                iph->daddr, dport, inet_iif(skb),
-                                inet_sdif(skb), net->ipv4.udp_table, NULL);
+                                iph->daddr, dport, iif,
+                                sdif, net->ipv4.udp_table, NULL);
 }
 
 INDIRECT_CALLABLE_SCOPE
index cc3d5ad..67a3b8f 100644 (file)
@@ -1073,7 +1073,7 @@ static int ip6mr_cache_report(const struct mr_table *mrt, struct sk_buff *pkt,
                   And all this only to mangle msg->im6_msgtype and
                   to set msg->im6_mbz to "mbz" :-)
                 */
-               skb_push(skb, -skb_network_offset(pkt));
+               __skb_pull(skb, skb_network_offset(pkt));
 
                skb_push(skb, sizeof(*msg));
                skb_reset_transport_header(skb);
index f804c11..c2c2918 100644 (file)
@@ -120,7 +120,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        ipcm6_init_sk(&ipc6, np);
        ipc6.sockc.tsflags = sk->sk_tsflags;
-       ipc6.sockc.mark = sk->sk_mark;
+       ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
 
        fl6.flowi6_oif = oif;
 
index ac1cef0..49381f3 100644 (file)
@@ -614,7 +614,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        skb_reserve(skb, hlen);
 
        skb->protocol = htons(ETH_P_IPV6);
-       skb->priority = sk->sk_priority;
+       skb->priority = READ_ONCE(sk->sk_priority);
        skb->mark = sockc->mark;
        skb->tstamp = sockc->transmit_time;
 
@@ -774,12 +774,12 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
         */
        memset(&fl6, 0, sizeof(fl6));
 
-       fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
        fl6.flowi6_uid = sk->sk_uid;
 
        ipcm6_init(&ipc6);
        ipc6.sockc.tsflags = sk->sk_tsflags;
-       ipc6.sockc.mark = sk->sk_mark;
+       ipc6.sockc.mark = fl6.flowi6_mark;
 
        if (sin6) {
                if (addr_len < SIN6_LEN_RFC2133)
index 64e873f..56a5558 100644 (file)
@@ -2951,7 +2951,8 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
        if (!oif && skb->dev)
                oif = l3mdev_master_ifindex(skb->dev);
 
-       ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
+       ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
+                       sk->sk_uid);
 
        dst = __sk_dst_get(sk);
        if (!dst || !dst->obsolete ||
@@ -3172,8 +3173,8 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
 
 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
 {
-       ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
-                    sk->sk_uid);
+       ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
+                    READ_ONCE(sk->sk_mark), sk->sk_uid);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
 
index 4714eb6..6e86721 100644 (file)
@@ -564,8 +564,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                opt = ireq->ipv6_opt;
                if (!opt)
                        opt = rcu_dereference(np->opt);
-               err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
-                              tclass, sk->sk_priority);
+               err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
+                              opt, tclass, sk->sk_priority);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
@@ -939,7 +939,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
                if (sk->sk_state == TCP_TIME_WAIT)
                        mark = inet_twsk(sk)->tw_mark;
                else
-                       mark = sk->sk_mark;
+                       mark = READ_ONCE(sk->sk_mark);
                skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
        }
        if (txhash) {
@@ -1128,7 +1128,8 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                        tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
                        READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
-                       ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
+                       ipv6_get_dsfield(ipv6_hdr(skb)), 0,
+                       READ_ONCE(sk->sk_priority),
                        READ_ONCE(tcp_rsk(req)->txhash));
 }
 
index b7c972a..f787e6b 100644 (file)
@@ -51,6 +51,7 @@
 #include <net/inet6_hashtables.h>
 #include <net/busy_poll.h>
 #include <net/sock_reuseport.h>
+#include <net/gro.h>
 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -300,10 +301,13 @@ struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
 {
        const struct ipv6hdr *iph = ipv6_hdr(skb);
        struct net *net = dev_net(skb->dev);
+       int iif, sdif;
+
+       inet6_get_iif_sdif(skb, &iif, &sdif);
 
        return __udp6_lib_lookup(net, &iph->saddr, sport,
-                                &iph->daddr, dport, inet6_iif(skb),
-                                inet6_sdif(skb), net->ipv4.udp_table, NULL);
+                                &iph->daddr, dport, iif,
+                                sdif, net->ipv4.udp_table, NULL);
 }
 
 /* Must be called under rcu_read_lock().
@@ -624,7 +628,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT) {
                if (tunnel) {
                        ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
-                                    sk->sk_mark, sk->sk_uid);
+                                    READ_ONCE(sk->sk_mark), sk->sk_uid);
                } else {
                        ip6_sk_redirect(skb, sk);
                }
@@ -1356,7 +1360,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        ipcm6_init(&ipc6);
        ipc6.gso_size = READ_ONCE(up->gso_size);
        ipc6.sockc.tsflags = sk->sk_tsflags;
-       ipc6.sockc.mark = sk->sk_mark;
+       ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
 
        /* destination address check */
        if (sin6) {
index 09fa7a4..6b95ba2 100644 (file)
@@ -118,10 +118,13 @@ static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
 {
        const struct ipv6hdr *iph = skb_gro_network_header(skb);
        struct net *net = dev_net(skb->dev);
+       int iif, sdif;
+
+       inet6_get_iif_sdif(skb, &iif, &sdif);
 
        return __udp6_lib_lookup(net, &iph->saddr, sport,
-                                &iph->daddr, dport, inet6_iif(skb),
-                                inet6_sdif(skb), net->ipv4.udp_table, NULL);
+                                &iph->daddr, dport, iif,
+                                sdif, net->ipv4.udp_table, NULL);
 }
 
 INDIRECT_CALLABLE_SCOPE
index b1623f9..ff78217 100644 (file)
@@ -519,7 +519,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        /* Get and verify the address */
        memset(&fl6, 0, sizeof(fl6));
 
-       fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
        fl6.flowi6_uid = sk->sk_uid;
 
        ipcm6_init(&ipc6);
index 63f7a09..a3f1fe8 100644 (file)
@@ -103,7 +103,7 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in
                        break;
                case SO_MARK:
                        if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) {
-                               ssk->sk_mark = sk->sk_mark;
+                               WRITE_ONCE(ssk->sk_mark, sk->sk_mark);
                                sk_dst_reset(ssk);
                        }
                        break;
index 84def74..9ed85be 100644 (file)
@@ -107,7 +107,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
                break;
        case NFT_SOCKET_MARK:
                if (sk_fullsock(sk)) {
-                       *dest = sk->sk_mark;
+                       *dest = READ_ONCE(sk->sk_mark);
                } else {
                        regs->verdict.code = NFT_BREAK;
                        return;
index 7013f55..76e01f2 100644 (file)
@@ -77,7 +77,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
 
                if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
                    transparent && sk_fullsock(sk))
-                       pskb->mark = sk->sk_mark;
+                       pskb->mark = READ_ONCE(sk->sk_mark);
 
                if (sk != skb->sk)
                        sock_gen_put(sk);
@@ -138,7 +138,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
 
                if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
                    transparent && sk_fullsock(sk))
-                       pskb->mark = sk->sk_mark;
+                       pskb->mark = READ_ONCE(sk->sk_mark);
 
                if (sk != skb->sk)
                        sock_gen_put(sk);
index 8e3ddec..a4631cb 100644 (file)
@@ -2050,8 +2050,8 @@ retry:
 
        skb->protocol = proto;
        skb->dev = dev;
-       skb->priority = sk->sk_priority;
-       skb->mark = sk->sk_mark;
+       skb->priority = READ_ONCE(sk->sk_priority);
+       skb->mark = READ_ONCE(sk->sk_mark);
        skb->tstamp = sockc.transmit_time;
 
        skb_setup_tx_timestamp(skb, sockc.tsflags);
@@ -2585,8 +2585,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
 
        skb->protocol = proto;
        skb->dev = dev;
-       skb->priority = po->sk.sk_priority;
-       skb->mark = po->sk.sk_mark;
+       skb->priority = READ_ONCE(po->sk.sk_priority);
+       skb->mark = READ_ONCE(po->sk.sk_mark);
        skb->tstamp = sockc->transmit_time;
        skb_setup_tx_timestamp(skb, sockc->tsflags);
        skb_zcopy_set_nouarg(skb, ph.raw);
@@ -2988,7 +2988,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                goto out_unlock;
 
        sockcm_init(&sockc, sk);
-       sockc.mark = sk->sk_mark;
+       sockc.mark = READ_ONCE(sk->sk_mark);
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
                if (unlikely(err))
@@ -3061,7 +3061,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 
        skb->protocol = proto;
        skb->dev = dev;
-       skb->priority = sk->sk_priority;
+       skb->priority = READ_ONCE(sk->sk_priority);
        skb->mark = sockc.mark;
        skb->tstamp = sockc.transmit_time;
 
index 8da9d03..9f0711d 100644 (file)
@@ -776,7 +776,8 @@ mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
        [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
 };
 
-static const struct nla_policy cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX] = {
+static const struct nla_policy
+cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = {
        [TCA_FLOWER_KEY_CFM_MD_LEVEL]   = NLA_POLICY_MAX(NLA_U8,
                                                FLOW_DIS_CFM_MDL_MAX),
        [TCA_FLOWER_KEY_CFM_OPCODE]     = { .type = NLA_U8 },
@@ -1709,7 +1710,7 @@ static int fl_set_key_cfm(struct nlattr **tb,
                          struct fl_flow_key *mask,
                          struct netlink_ext_ack *extack)
 {
-       struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX];
+       struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1];
        int err;
 
        if (!tb[TCA_FLOWER_KEY_CFM])
index 8641f80..c49d6af 100644 (file)
@@ -267,7 +267,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
                        return -ENOBUFS;
 
                fnew->id = f->id;
-               fnew->res = f->res;
                fnew->ifindex = f->ifindex;
                fnew->tp = f->tp;
 
index d0c5372..1e20bbd 100644 (file)
@@ -513,7 +513,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
        if (fold) {
                f->id = fold->id;
                f->iif = fold->iif;
-               f->res = fold->res;
                f->handle = fold->handle;
 
                f->tp = fold->tp;
index 5abf31e..da4c179 100644 (file)
@@ -826,7 +826,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
 
        new->ifindex = n->ifindex;
        new->fshift = n->fshift;
-       new->res = n->res;
        new->flags = n->flags;
        RCU_INIT_POINTER(new->ht_down, ht);
 
@@ -1024,18 +1023,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                return -EINVAL;
        }
 
+       /* At this point, we need to derive the new handle that will be used to
+        * uniquely map the identity of this table match entry. The
+        * identity of the entry that we need to construct is 32 bits made of:
+        *     htid(12b):bucketid(8b):node/entryid(12b)
+        *
+        * At this point _we have the table(ht)_ in which we will insert this
+        * entry. We carry the table's id in variable "htid".
+        * Note that earlier code picked the ht selection either by a) the user
+        * providing the htid specified via TCA_U32_HASH attribute or b) when
+        * no such attribute is passed then the root ht, is default to at ID
+        * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
+        * If OTOH the user passed us the htid, they may also pass a bucketid of
+        * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
+        * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
+        * passed via the htid, so even if it was non-zero it will be ignored.
+        *
+        * We may also have a handle, if the user passed one. The handle also
+        * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
+        * Rule: the bucketid on the handle is ignored even if one was passed;
+        * rather the value on "htid" is always assumed to be the bucketid.
+        */
        if (handle) {
+               /* Rule: The htid from handle and tableid from htid must match */
                if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
                        NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
                        return -EINVAL;
                }
-               handle = htid | TC_U32_NODE(handle);
-               err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
-                                   GFP_KERNEL);
-               if (err)
-                       return err;
-       } else
+               /* Ok, so far we have a valid htid(12b):bucketid(8b) but we
+                * need to finalize the table entry identification with the last
+                * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
+                * entries. Rule: nodeid of 0 is reserved only for tables(see
+                * earlier code which processes TC_U32_DIVISOR attribute).
+                * Rule: The nodeid can only be derived from the handle (and not
+                * htid).
+                * Rule: if the handle specified zero for the node id example
+                * 0x60000000, then pick a new nodeid from the pool of IDs
+                * this hash table has been allocating from.
+                * If OTOH it is specified (i.e for example the user passed a
+                * handle such as 0x60000123), then we use it generate our final
+                * handle which is used to uniquely identify the match entry.
+                */
+               if (!TC_U32_NODE(handle)) {
+                       handle = gen_new_kid(ht, htid);
+               } else {
+                       handle = htid | TC_U32_NODE(handle);
+                       err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
+                                           handle, GFP_KERNEL);
+                       if (err)
+                               return err;
+               }
+       } else {
+               /* The user did not give us a handle; lets just generate one
+                * from the table's pool of nodeids.
+                */
                handle = gen_new_kid(ht, htid);
+       }
 
        if (tb[TCA_U32_SEL] == NULL) {
                NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
index af85a73..6fdba06 100644 (file)
@@ -568,7 +568,7 @@ META_COLLECTOR(int_sk_rcvtimeo)
                *err = -1;
                return;
        }
-       dst->value = sk->sk_rcvtimeo / HZ;
+       dst->value = READ_ONCE(sk->sk_rcvtimeo) / HZ;
 }
 
 META_COLLECTOR(int_sk_sndtimeo)
@@ -579,7 +579,7 @@ META_COLLECTOR(int_sk_sndtimeo)
                *err = -1;
                return;
        }
-       dst->value = sk->sk_sndtimeo / HZ;
+       dst->value = READ_ONCE(sk->sk_sndtimeo) / HZ;
 }
 
 META_COLLECTOR(int_sk_sendmsg_off)
index 717ae51..8c9cfff 100644 (file)
@@ -1015,6 +1015,11 @@ static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
                                                              TC_FP_PREEMPTIBLE),
 };
 
+static struct netlink_range_validation_signed taprio_cycle_time_range = {
+       .min = 0,
+       .max = INT_MAX,
+};
+
 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
        [TCA_TAPRIO_ATTR_PRIOMAP]              = {
                .len = sizeof(struct tc_mqprio_qopt)
@@ -1023,7 +1028,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
        [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
        [TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
-       [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
+       [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           =
+               NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
        [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
        [TCA_TAPRIO_ATTR_TXTIME_DELAY]               = { .type = NLA_U32 },
@@ -1159,6 +1165,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
                        return -EINVAL;
                }
 
+               if (cycle < 0 || cycle > INT_MAX) {
+                       NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
+                       return -EINVAL;
+               }
+
                new->cycle_time = cycle;
        }
 
@@ -1347,7 +1358,7 @@ static void setup_txtime(struct taprio_sched *q,
                         struct sched_gate_list *sched, ktime_t base)
 {
        struct sched_entry *entry;
-       u32 interval = 0;
+       u64 interval = 0;
 
        list_for_each_entry(entry, &sched->entries, list) {
                entry->next_txtime = ktime_add_ns(base, interval);
index a7f887d..0c013d2 100644 (file)
@@ -445,7 +445,7 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
        nsk->sk_rcvbuf = osk->sk_rcvbuf;
        nsk->sk_sndtimeo = osk->sk_sndtimeo;
        nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
-       nsk->sk_mark = osk->sk_mark;
+       nsk->sk_mark = READ_ONCE(osk->sk_mark);
        nsk->sk_priority = osk->sk_priority;
        nsk->sk_rcvlowat = osk->sk_rcvlowat;
        nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
index 7858521..86930a8 100644 (file)
@@ -790,7 +790,7 @@ static int unix_set_peek_off(struct sock *sk, int val)
        if (mutex_lock_interruptible(&u->iolock))
                return -EINTR;
 
-       sk->sk_peek_off = val;
+       WRITE_ONCE(sk->sk_peek_off, val);
        mutex_unlock(&u->iolock);
 
        return 0;
index 8bf00ca..0cf1ce7 100644 (file)
@@ -657,7 +657,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
 
        ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
        if (ret)
-               return ret;
+               return 0;
 
        for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
                            ies->data, ies->len) {
index 31dca4e..b89adb5 100644 (file)
@@ -505,7 +505,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
 
        skb->dev = dev;
        skb->priority = xs->sk.sk_priority;
-       skb->mark = xs->sk.sk_mark;
+       skb->mark = READ_ONCE(xs->sk.sk_mark);
        skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
        skb->destructor = xsk_destruct_skb;
 
index e7617c9..d6b4057 100644 (file)
@@ -2250,7 +2250,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
 
                match = xfrm_selector_match(&pol->selector, fl, family);
                if (match) {
-                       if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
+                       if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
                            pol->if_id != if_id) {
                                pol = NULL;
                                goto out;
index e721290..4467979 100755 (executable)
@@ -164,7 +164,7 @@ def recursive_file_lookup(path, file_map):
 def get_all_devices_test_status(file_map):
 
         for device in file_map:
-                if (get_test_state(locate_state(device, file_map)) is 1):
+                if (get_test_state(locate_state(device, file_map)) == 1):
                         print("Testing = ON for: {}"
                               .format(device.split("/")[5]))
                 else:
@@ -203,7 +203,7 @@ def write_test_files(path, value):
 def set_test_state(state_path, state_value, quiet):
 
         write_test_files(state_path, state_value)
-        if (get_test_state(state_path) is 1):
+        if (get_test_state(state_path) == 1):
                 if (not quiet):
                         print("Testing = ON for device: {}"
                               .format(state_path.split("/")[5]))
index 561de0c..512a8f1 100644 (file)
@@ -54,10 +54,11 @@ double perf_pmu__cpu_slots_per_cycle(void)
                perf_pmu__pathname_scnprintf(path, sizeof(path),
                                             pmu->name, "caps/slots");
                /*
-                * The value of slots is not greater than 32 bits, but sysfs__read_int
-                * can't read value with 0x prefix, so use sysfs__read_ull instead.
+                * The value of slots is not greater than 32 bits, but
+                * filename__read_int can't read value with 0x prefix,
+                * so use filename__read_ull instead.
                 */
-               sysfs__read_ull(path, &slots);
+               filename__read_ull(path, &slots);
        }
 
        return slots ? (double)slots : NAN;
index b7223fe..5f3edb3 100644 (file)
@@ -250,6 +250,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
        if (!chain || chain->nr < 3)
                return skip_slot;
 
+       addr_location__init(&al);
        ip = chain->ips[1];
 
        thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
@@ -259,6 +260,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
 
        if (!dso) {
                pr_debug("%" PRIx64 " dso is NULL\n", ip);
+               addr_location__exit(&al);
                return skip_slot;
        }
 
@@ -279,5 +281,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
                 */
                skip_slot = 3;
        }
+
+       addr_location__exit(&al);
        return skip_slot;
 }
index b2f8284..658fb95 100644 (file)
@@ -1631,6 +1631,16 @@ static bool test__pmu_cpu_valid(void)
        return !!perf_pmus__find("cpu");
 }
 
+static bool test__pmu_cpu_event_valid(void)
+{
+       struct perf_pmu *pmu = perf_pmus__find("cpu");
+
+       if (!pmu)
+               return false;
+
+       return perf_pmu__has_format(pmu, "event");
+}
+
 static bool test__intel_pt_valid(void)
 {
        return !!perf_pmus__find("intel_pt");
@@ -2179,7 +2189,7 @@ static const struct evlist_test test__events_pmu[] = {
        },
        {
                .name  = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
-               .valid = test__pmu_cpu_valid,
+               .valid = test__pmu_cpu_event_valid,
                .check = test__checkevent_complex_name,
                /* 3 */
        },
index 00d2e0e..319f36e 100755 (executable)
@@ -4,6 +4,12 @@
 
 set -e
 
+# skip if there's no gcc
+if ! [ -x "$(command -v gcc)" ]; then
+        echo "failed: no gcc compiler"
+        exit 2
+fi
+
 temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
 
 cleanup()
@@ -11,7 +17,7 @@ cleanup()
        trap - EXIT TERM INT
        if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
                echo "--- Cleaning up ---"
-               perf probe -x ${temp_dir}/testfile -d foo
+               perf probe -x ${temp_dir}/testfile -d foo || true
                rm -f "${temp_dir}/"*
                rmdir "${temp_dir}"
        fi
index acde097..c9ec0ca 100644 (file)
@@ -2100,16 +2100,16 @@ __weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
        return lhs->core.idx - rhs->core.idx;
 }
 
-static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r)
+static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
 {
        const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
        const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
        const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
        const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
-       int *leader_idx = state;
-       int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret;
+       int *force_grouped_idx = _fg_idx;
+       int lhs_sort_idx, rhs_sort_idx, ret;
        const char *lhs_pmu_name, *rhs_pmu_name;
-       bool lhs_has_group = false, rhs_has_group = false;
+       bool lhs_has_group, rhs_has_group;
 
        /*
         * First sort by grouping/leader. Read the leader idx only if the evsel
@@ -2121,15 +2121,25 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list
         */
        if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
                lhs_has_group = true;
-               lhs_leader_idx = lhs_core->leader->idx;
+               lhs_sort_idx = lhs_core->leader->idx;
+       } else {
+               lhs_has_group = false;
+               lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)
+                       ? *force_grouped_idx
+                       : lhs_core->idx;
        }
        if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
                rhs_has_group = true;
-               rhs_leader_idx = rhs_core->leader->idx;
+               rhs_sort_idx = rhs_core->leader->idx;
+       } else {
+               rhs_has_group = false;
+               rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)
+                       ? *force_grouped_idx
+                       : rhs_core->idx;
        }
 
-       if (lhs_leader_idx != rhs_leader_idx)
-               return lhs_leader_idx - rhs_leader_idx;
+       if (lhs_sort_idx != rhs_sort_idx)
+               return lhs_sort_idx - rhs_sort_idx;
 
        /* Group by PMU if there is a group. Groups can't span PMUs. */
        if (lhs_has_group && rhs_has_group) {
@@ -2146,10 +2156,10 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list
 
 static int parse_events__sort_events_and_fix_groups(struct list_head *list)
 {
-       int idx = 0, unsorted_idx = -1;
+       int idx = 0, force_grouped_idx = -1;
        struct evsel *pos, *cur_leader = NULL;
        struct perf_evsel *cur_leaders_grp = NULL;
-       bool idx_changed = false;
+       bool idx_changed = false, cur_leader_force_grouped = false;
        int orig_num_leaders = 0, num_leaders = 0;
        int ret;
 
@@ -2174,12 +2184,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
                 */
                pos->core.idx = idx++;
 
-               if (unsorted_idx == -1 && pos == pos_leader && pos->core.nr_members < 2)
-                       unsorted_idx = pos->core.idx;
+               /* Remember an index to sort all forced grouped events together to. */
+               if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 &&
+                   arch_evsel__must_be_in_group(pos))
+                       force_grouped_idx = pos->core.idx;
        }
 
        /* Sort events. */
-       list_sort(&unsorted_idx, list, evlist__cmp);
+       list_sort(&force_grouped_idx, list, evlist__cmp);
 
        /*
         * Recompute groups, splitting for PMUs and adding groups for events
@@ -2189,8 +2201,9 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
        list_for_each_entry(pos, list, core.node) {
                const struct evsel *pos_leader = evsel__leader(pos);
                const char *pos_pmu_name = pos->group_pmu_name;
-               const char *cur_leader_pmu_name, *pos_leader_pmu_name;
-               bool force_grouped = arch_evsel__must_be_in_group(pos);
+               const char *cur_leader_pmu_name;
+               bool pos_force_grouped = force_grouped_idx != -1 &&
+                       arch_evsel__must_be_in_group(pos);
 
                /* Reset index and nr_members. */
                if (pos->core.idx != idx)
@@ -2206,7 +2219,8 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
                        cur_leader = pos;
 
                cur_leader_pmu_name = cur_leader->group_pmu_name;
-               if ((cur_leaders_grp != pos->core.leader && !force_grouped) ||
+               if ((cur_leaders_grp != pos->core.leader &&
+                    (!pos_force_grouped || !cur_leader_force_grouped)) ||
                    strcmp(cur_leader_pmu_name, pos_pmu_name)) {
                        /* Event is for a different group/PMU than last. */
                        cur_leader = pos;
@@ -2216,14 +2230,14 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
                         * group.
                         */
                        cur_leaders_grp = pos->core.leader;
-               }
-               pos_leader_pmu_name = pos_leader->group_pmu_name;
-               if (strcmp(pos_leader_pmu_name, pos_pmu_name) || force_grouped) {
                        /*
-                        * Event's PMU differs from its leader's. Groups can't
-                        * span PMUs, so update leader from the group/PMU
-                        * tracker.
+                        * Avoid forcing events into groups with events that
+                        * don't need to be in the group.
                         */
+                       cur_leader_force_grouped = pos_force_grouped;
+               }
+               if (pos_leader != cur_leader) {
+                       /* The leader changed so update it. */
                        evsel__set_leader(pos, cur_leader);
                }
        }
index 7f984a7..28380e7 100644 (file)
@@ -1440,6 +1440,17 @@ void perf_pmu__del_formats(struct list_head *formats)
        }
 }
 
+bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name)
+{
+       struct perf_pmu_format *format;
+
+       list_for_each_entry(format, &pmu->format, list) {
+               if (!strcmp(format->name, name))
+                       return true;
+       }
+       return false;
+}
+
 bool is_pmu_core(const char *name)
 {
        return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name);
index 203b928..6b414ce 100644 (file)
@@ -234,6 +234,7 @@ int perf_pmu__new_format(struct list_head *list, char *name,
 void perf_pmu__set_format(unsigned long *bits, long from, long to);
 int perf_pmu__format_parse(int dirfd, struct list_head *head);
 void perf_pmu__del_formats(struct list_head *formats);
+bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name);
 
 bool is_pmu_core(const char *name);
 bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu);
index 3cd9de4..c58ba9f 100644 (file)
@@ -152,16 +152,14 @@ static void pmu_read_sysfs(bool core_only)
        }
 
        closedir(dir);
-       if (core_only) {
-               if (!list_empty(&core_pmus))
-                       read_sysfs_core_pmus = true;
-               else {
-                       if (perf_pmu__create_placeholder_core_pmu(&core_pmus))
-                               read_sysfs_core_pmus = true;
-               }
-       } else {
+       if (list_empty(&core_pmus)) {
+               if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
+                       pr_err("Failure to set up any core PMUs\n");
+       }
+       if (!list_empty(&core_pmus)) {
                read_sysfs_core_pmus = true;
-               read_sysfs_all_pmus = true;
+               if (!core_only)
+                       read_sysfs_all_pmus = true;
        }
 }
 
index 0e04f9f..a148181 100644 (file)
@@ -159,7 +159,7 @@ void create_clients(struct __test_metadata *_metadata,
                /* Make sure SYN will be processed on the i-th CPU
                 * and finally distributed to the i-th listener.
                 */
-               sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+               ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
                ASSERT_EQ(ret, 0);
 
                for (j = 0; j < CLIENT_PER_SERVER; j++) {
index 5cbc392..2c0d2b1 100644 (file)
@@ -1,6 +1,4 @@
 // SPDX-License-Identifier: GPL-2.0-only
-#include <sys/prctl.h>
-
 #define THIS_PROGRAM "./vstate_exec_nolibc"
 
 int main(int argc, char **argv)
index a444553..08d4861 100644 (file)
         "teardown": [
             "echo \"1\" > /sys/bus/netdevsim/del_device"
         ]
+    },
+    {
+        "id": "3e1e",
+        "name": "Add taprio Qdisc with an invalid cycle-time",
+        "category": [
+            "qdisc",
+            "taprio"
+        ],
+        "plugins": {
+            "requires": "nsPlugin"
+        },
+        "setup": [
+            "echo \"1 1 8\" > /sys/bus/netdevsim/new_device",
+            "$TC qdisc add dev $ETH root handle 1: taprio num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@0 1@0 base-time 1000000000 sched-entry S 01 300000 flags 0x1 clockid CLOCK_TAI cycle-time 4294967296 || /bin/true",
+            "$IP link set dev $ETH up",
+            "$IP addr add 10.10.10.10/24 dev $ETH"
+        ],
+        "cmdUnderTest": "/bin/true",
+        "expExitCode": "0",
+        "verifyCmd": "$TC qdisc show dev $ETH",
+        "matchPattern": "qdisc taprio 1: root refcnt",
+        "matchCount": "0",
+        "teardown": [
+            "echo \"1\" > /sys/bus/netdevsim/del_device"
+        ]
     }
 ]
index 43a254f..21a98ba 100644 (file)
@@ -8,5 +8,5 @@ vsock_perf: vsock_perf.o
 CFLAGS += -g -O2 -Werror -Wall -I. -I../../include -I../../../usr/include -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -D_GNU_SOURCE
 .PHONY: all test clean
 clean:
-       ${RM} *.o *.d vsock_test vsock_diag_test
+       ${RM} *.o *.d vsock_test vsock_diag_test vsock_perf
 -include *.d