Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
authorDavid S. Miller <davem@davemloft.net>
Mon, 28 Jun 2021 22:28:03 +0000 (15:28 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 28 Jun 2021 22:28:03 +0000 (15:28 -0700)
Daniel Borkmann says:

====================
pull-request: bpf-next 2021-06-28

The following pull-request contains BPF updates for your *net-next* tree.

We've added 37 non-merge commits during the last 12 day(s) which contain
a total of 56 files changed, 394 insertions(+), 380 deletions(-).

The main changes are:

1) XDP driver RCU cleanups, from Toke Høiland-Jørgensen and Paul E. McKenney.

2) Fix bpf_skb_change_proto() IPv4/v6 GSO handling, from Maciej Żenczykowski.

3) Fix false positive kmemleak report for BPF ringbuf alloc, from Rustam Kovhaev.

4) Fix x86 JIT's extable offset calculation for PROBE_LDX NULL, from Ravi Bangoria.

5) Enable libbpf fallback probing with tracing under RHEL7, from Jonathan Edwards.

6) Clean up x86 JIT to remove unused cnt tracking from EMIT macro, from Jiri Olsa.

7) Netlink cleanups for libbpf to please Coverity, from Kumar Kartikeya Dwivedi.

8) Allow to retrieve ancestor cgroup id in tracing programs, from Namhyung Kim.

9) Fix lirc BPF program query to use user-provided prog_cnt, from Sean Young.

10) Add initial libbpf doc including generated kdoc for its API, from Grant Seltzer.

11) Make xdp_rxq_info_unreg_mem_model() more robust, from Jakub Kicinski.

12) Fix up bpfilter startup log-level to info level, from Gary Lin.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1351 files changed:
.clang-format
.mailmap
Documentation/devicetree/bindings/connector/usb-connector.yaml
Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
Documentation/devicetree/bindings/media/renesas,drif.yaml
Documentation/devicetree/bindings/net/ingenic,mac.yaml
Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/qcom,ipa.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
Documentation/driver-api/usb/usb.rst
Documentation/firmware-guide/acpi/dsd/phy.rst
Documentation/firmware-guide/acpi/index.rst
Documentation/networking/device_drivers/ethernet/google/gve.rst
Documentation/networking/dsa/sja1105.rst
Documentation/networking/ethtool-netlink.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/mptcp-sysctl.rst
Documentation/networking/nf_conntrack-sysctl.rst
Documentation/userspace-api/seccomp_filter.rst
Documentation/virt/kvm/mmu.rst
Documentation/virt/kvm/vcpu-requests.rst
Documentation/vm/slub.rst
MAINTAINERS
Makefile
arch/alpha/include/uapi/asm/socket.h
arch/arc/include/uapi/asm/sigcontext.h
arch/arc/kernel/signal.c
arch/arc/kernel/vmlinux.lds.S
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6q-dhcom-som.dtsi
arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
arch/arm/boot/dts/imx7d-meerkat96.dts
arch/arm/boot/dts/imx7d-pico.dtsi
arch/arm/include/asm/cpuidle.h
arch/arm/mach-imx/pm-imx27.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/pm.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
arch/arm64/boot/dts/microchip/sparx5.dtsi
arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
arch/arm64/boot/dts/ti/k3-am64-main.dtsi
arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
arch/arm64/boot/dts/ti/k3-am65-main.dtsi
arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
arch/arm64/boot/dts/ti/k3-am654-base-board.dts
arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/exception.c
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/mmu.c
arch/mips/alchemy/board-xxs1500.c
arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
arch/mips/boot/dts/loongson/ls7a-pch.dtsi
arch/mips/include/asm/mips-boards/launch.h
arch/mips/include/uapi/asm/socket.h
arch/mips/lib/mips-atomic.c
arch/mips/mm/cache.c
arch/mips/ralink/of.c
arch/parisc/include/uapi/asm/socket.h
arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/pte-walk.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/io-workarounds.c
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/riscv/Kconfig
arch/riscv/Makefile
arch/riscv/boot/dts/microchip/Makefile
arch/riscv/boot/dts/sifive/Makefile
arch/riscv/errata/sifive/Makefile
arch/riscv/include/asm/alternative-macros.h
arch/riscv/include/asm/kexec.h
arch/riscv/kernel/machine_kexec.c
arch/riscv/kernel/probes/kprobes.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vmlinux-xip.lds.S
arch/riscv/mm/init.c
arch/sparc/include/uapi/asm/socket.h
arch/x86/Makefile
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/apic.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/thermal.h
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/setup.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/kvm_emulate.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/capabilities.h
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/posted_intr.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/x86/mm/mem_encrypt_identity.c
arch/x86/pci/fixup.c
arch/x86/platform/efi/quirks.c
arch/x86/realmode/init.c
crypto/async_tx/async_xor.c
drivers/acpi/acpi_apd.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/bus.c
drivers/acpi/internal.h
drivers/acpi/power.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/base/core.c
drivers/base/memory.c
drivers/block/loop.c
drivers/block/loop.h
drivers/bluetooth/btusb.c
drivers/bus/mhi/pci_generic.c
drivers/bus/ti-sysc.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/cppc_cpufreq.c
drivers/dma/Kconfig
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
drivers/dma/idxd/cdev.c
drivers/dma/idxd/init.c
drivers/dma/ipu/ipu_irq.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/dma/pl330.c
drivers/dma/qcom/Kconfig
drivers/dma/sf-pdma/Kconfig
drivers/dma/sh/rcar-dmac.c
drivers/dma/ste_dma40.c
drivers/dma/stm32-mdma.c
drivers/dma/xilinx/xilinx_dpdma.c
drivers/dma/xilinx/zynqmp_dma.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/fdtparams.c
drivers/firmware/efi/libstub/file.c
drivers/firmware/efi/memattr.c
drivers/gpio/gpio-wcd934x.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_mm.c
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/mcde/mcde_dsi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/host1x/bus.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/amd_sfh_hid.c
drivers/hid/hid-a4tech.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ft260.c
drivers/hid/hid-gt683r.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/hid-semitek.c [new file with mode: 0644]
drivers/hid/hid-sensor-custom.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-thrustmaster.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/surface-hid/surface_hid_core.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-pidff.c
drivers/hwmon/corsair-psu.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/pmbus/fsp-3y.c
drivers/hwmon/pmbus/isl68137.c
drivers/hwmon/pmbus/q54sj108a2.c
drivers/hwmon/scpi-hwmon.c
drivers/hwmon/tps23861.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-ali1563.c
drivers/i2c/busses/i2c-altera.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-icy.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-st.c
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/busses/i2c-tegra-bpmp.c
drivers/i2c/muxes/i2c-arb-gpio-challenge.c
drivers/iio/adc/ad7124.c
drivers/iio/adc/ad7192.c
drivers/iio/adc/ad7768-1.c
drivers/iio/adc/ad7793.c
drivers/iio/adc/ad7923.c
drivers/iio/dac/ad5770r.c
drivers/iio/gyro/fxas21002c_core.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/doorbell.c
drivers/infiniband/hw/mlx5/fs.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
drivers/interconnect/qcom/bcm-voter.c
drivers/iommu/amd/iommu.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/pasid.c
drivers/iommu/virtio-iommu.c
drivers/md/bcache/bcache.h
drivers/md/bcache/request.c
drivers/md/bcache/stats.c
drivers/md/bcache/stats.h
drivers/md/bcache/sysfs.c
drivers/md/dm-snap.c
drivers/md/dm-verity-verify-sig.c
drivers/md/raid5.c
drivers/misc/cardreader/rtl8411.c
drivers/misc/cardreader/rts5209.c
drivers/misc/cardreader/rts5227.c
drivers/misc/cardreader/rts5228.c
drivers/misc/cardreader/rts5229.c
drivers/misc/cardreader/rts5249.c
drivers/misc/cardreader/rts5260.c
drivers/misc/cardreader/rts5261.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/kgdbts.c
drivers/misc/mei/interrupt.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/net/bareudp.c
drivers/net/caif/caif_serial.c
drivers/net/can/usb/mcba_usb.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/dsa/sja1105/sja1105.h
drivers/net/dsa/sja1105/sja1105_clocking.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_spi.c
drivers/net/dsa/sja1105/sja1105_static_config.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnxt/Makefile
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/ec_bhf.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/google/Kconfig
drivers/net/ethernet/google/gve/Makefile
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_adminq.c
drivers/net/ethernet/google/gve/gve_adminq.h
drivers/net/ethernet/google/gve/gve_desc_dqo.h [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_dqo.h [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_ethtool.c
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_rx_dqo.c [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/google/gve/gve_tx_dqo.c [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_utils.c [new file with mode: 0644]
drivers/net/ethernet/google/gve/gve_utils.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/iavf/iavf_common.c
drivers/net/ethernet/intel/iavf/iavf_type.h
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_ptp.h
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_trace.h [new file with mode: 0644]
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx_lib.c
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/Kconfig
drivers/net/ethernet/mellanox/Makefile
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/Makefile [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core_env.c
drivers/net/ethernet/mellanox/mlxsw/core_env.h
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
drivers/net/ethernet/mellanox/mlxsw/minimal.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/Kconfig
drivers/net/ethernet/microchip/Makefile
drivers/net/ethernet/microchip/sparx5/Kconfig [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/Makefile [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_main.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_main.h [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_packet.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_port.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_port.h [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c [new file with mode: 0644]
drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c [new file with mode: 0644]
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/am65-cpsw-switchdev.c
drivers/net/ethernet/ti/cpsw_switchdev.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/gtp.c
drivers/net/hamradio/mkiss.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/rndis_filter.c
drivers/net/ipa/Makefile
drivers/net/ipa/gsi.c
drivers/net/ipa/gsi.h
drivers/net/ipa/gsi_reg.h
drivers/net/ipa/ipa_data-v3.1.c [new file with mode: 0644]
drivers/net/ipa/ipa_data.h
drivers/net/ipa/ipa_endpoint.c
drivers/net/ipa/ipa_main.c
drivers/net/mhi/net.c
drivers/net/mhi/proto_mbim.c
drivers/net/netdevsim/dev.c
drivers/net/netdevsim/netdevsim.h
drivers/net/phy/dp83867.c
drivers/net/usb/cdc_eem.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wan/c101.c
drivers/net/wan/hostess_sv11.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/pci.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
drivers/net/wireless/intel/iwlwifi/Makefile
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/cfg/9000.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/fw/dump.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
drivers/net/wireless/intel/iwlwifi/fw/uefi.c [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/fw/uefi.h [new file with mode: 0644]
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intersil/orinoco/hw.c
drivers/net/wireless/intersil/orinoco/hw.h
drivers/net/wireless/intersil/orinoco/wext.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwl8k.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
drivers/net/wireless/mediatek/mt76/mt7603/regs.h
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
drivers/net/wireless/mediatek/mt76/mt7615/init.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
drivers/net/wireless/mediatek/mt76/mt76_connac.h
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt7915/Makefile
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.h
drivers/net/wireless/mediatek/mt76/mt7915/main.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
drivers/net/wireless/mediatek/mt76/mt7915/pci.c
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
drivers/net/wireless/mediatek/mt76/mt7921/Makefile
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7921/dma.c
drivers/net/wireless/mediatek/mt76/mt7921/init.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.h
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
drivers/net/wireless/mediatek/mt76/sdio.c
drivers/net/wireless/mediatek/mt76/testmode.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt7601u/usb.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/realtek/rtw88/coex.c
drivers/net/wireless/realtek/rtw88/debug.c
drivers/net/wireless/realtek/rtw88/debug.h
drivers/net/wireless/realtek/rtw88/fw.c
drivers/net/wireless/realtek/rtw88/fw.h
drivers/net/wireless/realtek/rtw88/mac80211.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/realtek/rtw88/pci.c
drivers/net/wireless/realtek/rtw88/phy.c
drivers/net/wireless/realtek/rtw88/phy.h
drivers/net/wireless/realtek/rtw88/ps.c
drivers/net/wireless/realtek/rtw88/rtw8822c.c
drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
drivers/net/wwan/Kconfig
drivers/net/wwan/Makefile
drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
drivers/net/wwan/iosm/iosm_ipc_wwan.c
drivers/net/wwan/rpmsg_wwan_ctrl.c [new file with mode: 0644]
drivers/net/wwan/wwan_core.c
drivers/net/wwan/wwan_hwsim.c
drivers/net/xen-netback/interface.c
drivers/nfc/nxp-nci/core.c
drivers/nfc/nxp-nci/firmware.c
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/tcp.c
drivers/pci/controller/dwc/Makefile
drivers/pci/controller/dwc/pcie-tegra194-acpi.c [new file with mode: 0644]
drivers/pci/controller/dwc/pcie-tegra194.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/of.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/phy/broadcom/phy-brcm-usb-init.h
drivers/phy/cadence/phy-cadence-sierra.c
drivers/phy/mediatek/phy-mtk-tphy.c
drivers/phy/microchip/sparx5_serdes.c
drivers/phy/ralink/phy-mt7621-pci.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/aspeed/pinctrl-aspeed.c
drivers/pinctrl/aspeed/pinmux-aspeed.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-sdx55.c
drivers/pinctrl/ralink/pinctrl-rt2880.c
drivers/platform/mellanox/mlxreg-hotplug.c
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/surface_aggregator_registry.c
drivers/platform/surface/surface_dtx.c
drivers/platform/x86/thinkpad_acpi.c
drivers/ptp/ptp_clock.c
drivers/regulator/Kconfig
drivers/regulator/atc260x-regulator.c
drivers/regulator/bd718x7-regulator.c
drivers/regulator/core.c
drivers/regulator/cros-ec-regulator.c
drivers/regulator/da9121-regulator.c
drivers/regulator/fan53555.c
drivers/regulator/fan53880.c
drivers/regulator/fixed.c
drivers/regulator/helpers.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/hi655x-regulator.c
drivers/regulator/max77620-regulator.c
drivers/regulator/mt6315-regulator.c
drivers/regulator/rt4801-regulator.c
drivers/regulator/rtmv20-regulator.c
drivers/regulator/scmi-regulator.c
drivers/rpmsg/rpmsg_core.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_ops.c
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
drivers/scsi/aic7xxx/scsi_message.h
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/hosts.c
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/vmw_pvscsi.c
drivers/soc/amlogic/meson-clk-measure.c
drivers/soundwire/qcom.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-omap-uwire.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spi-zynq-qspi.c
drivers/staging/emxx_udc/emxx_udc.c
drivers/staging/iio/cdc/ad7746.c
drivers/staging/ralink-gdma/ralink-gdma.c
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
drivers/target/target_core_iblock.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tee/optee/call.c
drivers/tee/optee/optee_msg.h
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
drivers/thermal/intel/therm_throt.c
drivers/thermal/intel/x86_pkg_temp_thermal.c
drivers/thermal/qcom/qcom-spmi-adc-tm5.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/thunderbolt/dma_port.c
drivers/thunderbolt/usb4.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_aspeed_vuart.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_exar.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/rp2.c
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/chipidea/udc.c
drivers/usb/chipidea/usbmisc_imx.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/debug.h
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/dwc3-meson-g12a.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/config.c
drivers/usb/gadget/function/f_ecm.c
drivers/usb/gadget/function/f_eem.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_loopback.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/f_serial.c
drivers/usb/gadget/function/f_sourcesink.c
drivers/usb/gadget/function/f_subset.c
drivers/usb/gadget/function/f_tcm.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/misc/brcmstb-usb-pinmap.c
drivers/usb/misc/trancevibrator.c
drivers/usb/misc/uss720.c
drivers/usb/musb/musb_core.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/omninet.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/quatech2.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/typec/mux.c
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tcpm/wcove.c
drivers/usb/typec/ucsi/ucsi.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/platform/vfio_platform_common.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/hgafb.c
fs/afs/dir.c
fs/afs/main.c
fs/afs/write.c
fs/btrfs/block-group.c
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/reflink.c
fs/btrfs/tree-log.c
fs/btrfs/zoned.c
fs/cifs/cifs_ioctl.h
fs/cifs/cifspdu.h
fs/cifs/ioctl.c
fs/cifs/smb2pdu.c
fs/cifs/trace.h
fs/coredump.c
fs/debugfs/file.c
fs/debugfs/inode.c
fs/ext4/extents.c
fs/ext4/fast_commit.c
fs/ext4/fast_commit.h
fs/ext4/ialloc.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/sysfs.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/lops.c
fs/gfs2/lops.h
fs/gfs2/util.c
fs/hugetlbfs/inode.c
fs/io-wq.c
fs/io-wq.h
fs/io_uring.c
fs/nfs/client.c
fs/nfs/filelayout/filelayout.c
fs/nfs/namespace.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfstrace.h
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/super.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fdinfo.c
fs/ocfs2/file.c
fs/proc/base.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_trans_inode.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_message.h
include/asm-generic/vmlinux.lds.h
include/dt-bindings/usb/pd.h
include/linux/arch_topology.h
include/linux/compiler_attributes.h
include/linux/device.h
include/linux/entry-kvm.h
include/linux/ethtool.h
include/linux/fanotify.h
include/linux/fb.h
include/linux/hid.h
include/linux/host1x.h
include/linux/huge_mm.h
include/linux/hugetlb.h
include/linux/ieee80211.h
include/linux/if_arp.h
include/linux/if_bridge.h
include/linux/init.h
include/linux/kvm_host.h
include/linux/mfd/rohm-bd70528.h
include/linux/mfd/rohm-bd71828.h
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/transobj.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mod_devicetable.h
include/linux/pci.h
include/linux/pgtable.h
include/linux/platform_data/ti-sysc.h
include/linux/ptp_clock_kernel.h
include/linux/rmap.h
include/linux/rtsx_pci.h
include/linux/sched.h
include/linux/sctp.h
include/linux/socket.h
include/linux/sunrpc/xprt.h
include/linux/swapops.h
include/linux/tick.h
include/linux/usb/pd.h
include/linux/usb/pd_ext_sdb.h
include/linux/wwan.h
include/net/cfg80211.h
include/net/dsa.h
include/net/icmp.h
include/net/mac80211.h
include/net/mptcp.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_tables_core.h
include/net/netns/sctp.h
include/net/netns/xfrm.h
include/net/sch_generic.h
include/net/sctp/command.h
include/net/sctp/constants.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/sock.h
include/net/switchdev.h
include/net/tls.h
include/net/xfrm.h
include/sound/soc-dai.h
include/trace/events/mptcp.h
include/uapi/asm-generic/socket.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/ethtool_netlink.h
include/uapi/linux/in.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/io_uring.h
include/uapi/linux/kvm.h
include/uapi/linux/mptcp.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/nl80211.h
include/uapi/linux/sctp.h
include/uapi/linux/seg6_local.h
include/uapi/linux/snmp.h
init/main.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/crash_core.c
kernel/entry/common.c
kernel/events/core.c
kernel/irq_work.c
kernel/printk/printk_safe.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/pelt.h
kernel/seccomp.c
kernel/time/tick-sched.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
lib/crc64.c
lib/percpu-refcount.c
mm/debug_vm_pgtable.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/kasan/init.c
mm/kfence/core.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/page_alloc.c
mm/page_vma_mapped.c
mm/pgtable-generic.c
mm/rmap.c
mm/slab_common.c
mm/slub.c
mm/sparse.c
mm/swapfile.c
mm/truncate.c
net/8021q/vlan.h
net/appletalk/aarp.c
net/batman-adv/bat_iv_ogm.c
net/bluetooth/smp.c
net/bridge/br_cfm.c
net/bridge/br_fdb.c
net/bridge/br_mdb.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/br_vlan.c
net/bridge/br_vlan_tunnel.c
net/caif/cfcnfg.c
net/can/bcm.c
net/can/isotp.c
net/can/j1939/transport.c
net/can/raw.c
net/core/dev.c
net/core/devlink.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_reuseport.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/port.c
net/dsa/slave.c
net/dsa/switch.c
net/ethtool/eeprom.c
net/ethtool/ioctl.c
net/ethtool/netlink.h
net/ethtool/strset.c
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/cipso_ipv4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ipcomp.c
net/ipv4/ipip.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv4/xfrm4_tunnel.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/icmp.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipcomp6.c
net/ipv6/mip6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/seg6_local.c
net/ipv6/sit.c
net/ipv6/udp.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_tunnel.c
net/kcm/kcmsock.c
net/key/af_key.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/he.c
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/led.c
net/mac80211/main.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mptcp/ctrl.c
net/mptcp/mib.c
net/mptcp/mib.h
net/mptcp/mptcp_diag.c
net/mptcp/options.c
net/mptcp/pm.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/Makefile
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_hook.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_last.c [new file with mode: 0644]
net/openvswitch/Makefile
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/openvswitch_trace.c [new file with mode: 0644]
net/openvswitch/openvswitch_trace.h [new file with mode: 0644]
net/packet/af_packet.c
net/qrtr/qrtr.c
net/rds/recv.c
net/sched/act_ct.c
net/sched/cls_flower.c
net/sched/sch_cake.c
net/sched/sch_generic.c
net/sctp/associola.c
net/sctp/debug.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/smc/smc_stats.c
net/smc/smc_tx.c
net/socket.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/sunrpc/xprtsock.c
net/switchdev/switchdev.c
net/tipc/bcast.c
net/tipc/msg.c
net/tipc/msg.h
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/Makefile
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/pmsr.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/scan.c
net/wireless/sysfs.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-spy.c
net/xfrm/xfrm_hash.h
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
samples/vfio-mdev/mdpy-fb.c
scripts/recordmcount.h
sound/core/control_led.c
sound/core/seq/seq_timer.c
sound/core/timer.c
sound/firewire/amdtp-stream.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/amd/raven/acp3x.h
sound/soc/amd/raven/pci-acp3x.c
sound/soc/codecs/ak5558.c
sound/soc/codecs/cs35l32.c
sound/soc/codecs/cs35l33.c
sound/soc/codecs/cs35l34.c
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs42l56.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/cs53l30.c
sound/soc/codecs/da7219.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/max98088.c
sound/soc/codecs/rt5659.c
sound/soc/codecs/rt5682-sdw.c
sound/soc/codecs/rt711-sdca.c
sound/soc/codecs/sti-sas.c
sound/soc/codecs/tas2562.h
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl-asoc-card.c
sound/soc/generic/audio-graph-card.c
sound/soc/generic/simple-card.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass.h
sound/soc/soc-core.c
sound/soc/soc-topology.c
sound/soc/sof/intel/hda-dai.c
sound/soc/sof/pm.c
sound/soc/stm/stm32_sai_sub.c
sound/usb/format.c
sound/usb/mixer_quirks.c
sound/usb/mixer_scarlett_gen2.c
sound/usb/mixer_scarlett_gen2.h
tools/arch/mips/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/x86/include/asm/disabled-features.h
tools/bootconfig/include/linux/bootconfig.h
tools/bootconfig/main.c
tools/include/uapi/linux/kvm.h
tools/lib/bpf/xsk.c
tools/objtool/arch/x86/decode.c
tools/objtool/elf.c
tools/perf/Makefile.config
tools/perf/builtin-record.c
tools/perf/check-headers.sh
tools/perf/perf.c
tools/perf/pmu-events/arch/powerpc/power10/cache.json
tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
tools/perf/pmu-events/arch/powerpc/power10/frontend.json
tools/perf/pmu-events/arch/powerpc/power10/locks.json
tools/perf/pmu-events/arch/powerpc/power10/marked.json
tools/perf/pmu-events/arch/powerpc/power10/memory.json
tools/perf/pmu-events/arch/powerpc/power10/others.json
tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
tools/perf/pmu-events/arch/powerpc/power10/pmc.json
tools/perf/pmu-events/arch/powerpc/power10/translation.json
tools/perf/pmu-events/jevents.c
tools/perf/tests/attr/base-record
tools/perf/util/bpf_counter.c
tools/perf/util/dwarf-aux.c
tools/perf/util/env.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/perf_api_probe.c
tools/perf/util/perf_api_probe.h
tools/perf/util/probe-finder.c
tools/perf/util/session.c
tools/perf/util/stat-display.c
tools/perf/util/symbol-elf.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/and.c
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/bpf/verifier/dead_code.c
tools/testing/selftests/bpf/verifier/jmp32.c
tools/testing/selftests/bpf/verifier/jset.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/hardware_disable_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/kvm_util_internal.h
tools/testing/selftests/kvm/lib/perf_test_util.c
tools/testing/selftests/kvm/lib/rbtree.c [new file with mode: 0644]
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/memslot_modification_stress_test.c
tools/testing/selftests/kvm/memslot_perf_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/config
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/icmp.sh [new file with mode: 0755]
tools/testing/selftests/net/icmp_redirect.sh
tools/testing/selftests/net/mptcp/mptcp_connect.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/mptcp/simult_flows.sh
tools/testing/selftests/net/so_netns_cookie.c [new file with mode: 0644]
tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh [new file with mode: 0755]
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/udpgro_fwd.sh
tools/testing/selftests/net/veth.sh
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/nft_fib.sh [new file with mode: 0755]
tools/testing/selftests/proc/.gitignore
tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py
tools/testing/selftests/tc-testing/tc-tests/actions/ct.json
virt/kvm/kvm_main.c
virt/lib/irqbypass.c

index c24b147..15d4eaa 100644 (file)
@@ -109,8 +109,8 @@ ForEachMacros:
   - 'css_for_each_child'
   - 'css_for_each_descendant_post'
   - 'css_for_each_descendant_pre'
-  - 'cxl_for_each_cmd'
   - 'device_for_each_child_node'
+  - 'displayid_iter_for_each'
   - 'dma_fence_chain_for_each'
   - 'do_for_each_ftrace_op'
   - 'drm_atomic_crtc_for_each_plane'
@@ -136,6 +136,7 @@ ForEachMacros:
   - 'drm_mm_for_each_node_in_range'
   - 'drm_mm_for_each_node_safe'
   - 'flow_action_for_each'
+  - 'for_each_acpi_dev_match'
   - 'for_each_active_dev_scope'
   - 'for_each_active_drhd_unit'
   - 'for_each_active_iommu'
@@ -171,7 +172,6 @@ ForEachMacros:
   - 'for_each_dapm_widgets'
   - 'for_each_dev_addr'
   - 'for_each_dev_scope'
-  - 'for_each_displayid_db'
   - 'for_each_dma_cap_mask'
   - 'for_each_dpcm_be'
   - 'for_each_dpcm_be_rollback'
@@ -179,6 +179,7 @@ ForEachMacros:
   - 'for_each_dpcm_fe'
   - 'for_each_drhd_unit'
   - 'for_each_dss_dev'
+  - 'for_each_dtpm_table'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
   - 'for_each_element'
@@ -215,6 +216,7 @@ ForEachMacros:
   - 'for_each_migratetype_order'
   - 'for_each_msi_entry'
   - 'for_each_msi_entry_safe'
+  - 'for_each_msi_vector'
   - 'for_each_net'
   - 'for_each_net_continue_reverse'
   - 'for_each_netdev'
@@ -270,6 +272,12 @@ ForEachMacros:
   - 'for_each_prime_number_from'
   - 'for_each_process'
   - 'for_each_process_thread'
+  - 'for_each_prop_codec_conf'
+  - 'for_each_prop_dai_codec'
+  - 'for_each_prop_dai_cpu'
+  - 'for_each_prop_dlc_codecs'
+  - 'for_each_prop_dlc_cpus'
+  - 'for_each_prop_dlc_platforms'
   - 'for_each_property_of_node'
   - 'for_each_registered_fb'
   - 'for_each_requested_gpio'
@@ -430,6 +438,7 @@ ForEachMacros:
   - 'queue_for_each_hw_ctx'
   - 'radix_tree_for_each_slot'
   - 'radix_tree_for_each_tagged'
+  - 'rb_for_each'
   - 'rbtree_postorder_for_each_entry_safe'
   - 'rdma_for_each_block'
   - 'rdma_for_each_port'
index ce6c497..c79a787 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -243,6 +243,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
+Michel Lespinasse <michel@lespinasse.org>
+Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
+Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
 Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
 Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
 Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
index 32509b9..92b49bc 100644 (file)
@@ -149,6 +149,17 @@ properties:
     maxItems: 6
     $ref: /schemas/types.yaml#/definitions/uint32-array
 
+  sink-vdos-v1:
+    description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
+      providing additional information corresponding to the product, the detailed bit
+      definitions and the order of each VDO can be found in
+      "USB Power Delivery Specification Revision 2.0, Version 1.3" chapter 6.4.4.3.1 Discover
+      Identity. User can specify the VDO array via VDO_IDH/_CERT/_PRODUCT/_CABLE/_AMA defined in
+      dt-bindings/usb/pd.h.
+    minItems: 3
+    maxItems: 6
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+
   op-sink-microwatt:
     description: Sink required operating power in microwatt, if source can't
       offer the power, Capability Mismatch is set. Required for power sink and
@@ -207,6 +218,10 @@ properties:
       SNK_READY for non-pd link.
     type: boolean
 
+dependencies:
+  sink-vdos-v1: [ 'sink-vdos' ]
+  sink-vdos: [ 'sink-vdos-v1' ]
+
 required:
   - compatible
 
index 33ee575..926be9a 100644 (file)
@@ -49,7 +49,7 @@ examples:
         #size-cells = <0>;
 
         adc@48 {
-            comatible = "ti,ads7828";
+            compatible = "ti,ads7828";
             reg = <0x48>;
             vref-supply = <&vref>;
             ti,differential-input;
index 7b553d5..98c6fcf 100644 (file)
@@ -46,6 +46,13 @@ properties:
     description: |
       I2C bus timeout in microseconds
 
+  fsl,i2c-erratum-a004447:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description: |
+      Indicates the presence of QorIQ erratum A-004447, which
+      says that the standard i2c recovery scheme mechanism does
+      not work and an alternate implementation is needed.
+
 required:
   - compatible
   - reg
index ce505a7..9cd56ff 100644 (file)
@@ -67,9 +67,7 @@ properties:
     maxItems: 1
 
   clock-names:
-    maxItems: 1
-    items:
-      - const: fck
+    const: fck
 
   resets:
     maxItems: 1
index 5e93d4f..d08a881 100644 (file)
@@ -61,7 +61,7 @@ examples:
     #include <dt-bindings/clock/x1000-cgu.h>
 
     mac: ethernet@134b0000 {
-        compatible = "ingenic,x1000-mac", "snps,dwmac";
+        compatible = "ingenic,x1000-mac";
         reg = <0x134b0000 0x2000>;
 
         interrupt-parent = <&intc>;
diff --git a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
new file mode 100644 (file)
index 0000000..347b912
--- /dev/null
@@ -0,0 +1,226 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/microchip,sparx5-switch.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Sparx5 Ethernet switch controller
+
+maintainers:
+  - Steen Hegelund <steen.hegelund@microchip.com>
+  - Lars Povlsen <lars.povlsen@microchip.com>
+
+description: |
+  The SparX-5 Enterprise Ethernet switch family provides a rich set of
+  Enterprise switching features such as advanced TCAM-based VLAN and
+  QoS processing enabling delivery of differentiated services, and
+  security through TCAM-based frame processing using versatile content
+  aware processor (VCAP).
+
+  IPv4/IPv6 Layer 3 (L3) unicast and multicast routing is supported
+  with up to 18K IPv4/9K IPv6 unicast LPM entries and up to 9K IPv4/3K
+  IPv6 (S,G) multicast groups.
+
+  L3 security features include source guard and reverse path
+  forwarding (uRPF) tasks. Additional L3 features include VRF-Lite and
+  IP tunnels (IP over GRE/IP).
+
+  The SparX-5 switch family targets managed Layer 2 and Layer 3
+  equipment in SMB, SME, and Enterprise where high port count
+  1G/2.5G/5G/10G switching with 10G/25G aggregation links is required.
+
+properties:
+  $nodename:
+    pattern: "^switch@[0-9a-f]+$"
+
+  compatible:
+    const: microchip,sparx5-switch
+
+  reg:
+    items:
+      - description: cpu target
+      - description: devices target
+      - description: general control block target
+
+  reg-names:
+    items:
+      - const: cpu
+      - const: devices
+      - const: gcb
+
+  interrupts:
+    minItems: 1
+    items:
+      - description: register based extraction
+      - description: frame dma based extraction
+
+  interrupt-names:
+    minItems: 1
+    items:
+      - const: xtr
+      - const: fdma
+
+  resets:
+    items:
+      - description: Reset controller used for switch core reset (soft reset)
+
+  reset-names:
+    items:
+      - const: switch
+
+  mac-address: true
+
+  ethernet-ports:
+    type: object
+    patternProperties:
+      "^port@[0-9a-f]+$":
+        type: object
+
+        properties:
+          '#address-cells':
+            const: 1
+          '#size-cells':
+            const: 0
+
+          reg:
+            description: Switch port number
+
+          phys:
+            maxItems: 1
+            description:
+              phandle of a Ethernet SerDes PHY.  This defines which SerDes
+              instance will handle the Ethernet traffic.
+
+          phy-mode:
+            description:
+              This specifies the interface used by the Ethernet SerDes towards
+              the PHY or SFP.
+
+          microchip,bandwidth:
+            description: Specifies bandwidth in Mbit/s allocated to the port.
+            $ref: "/schemas/types.yaml#/definitions/uint32"
+            maximum: 25000
+
+          phy-handle:
+            description:
+              phandle of a Ethernet PHY.  This is optional and if provided it
+              points to the cuPHY used by the Ethernet SerDes.
+
+          sfp:
+            description:
+              phandle of an SFP.  This is optional and used when not specifying
+              a cuPHY.  It points to the SFP node that describes the SFP used by
+              the Ethernet SerDes.
+
+          managed: true
+
+          microchip,sd-sgpio:
+            description:
+              Index of the ports Signal Detect SGPIO in the set of 384 SGPIOs
+              This is optional, and only needed if the default used index is
+              is not correct.
+            $ref: "/schemas/types.yaml#/definitions/uint32"
+            minimum: 0
+            maximum: 383
+
+        required:
+          - reg
+          - phys
+          - phy-mode
+          - microchip,bandwidth
+
+        oneOf:
+          - required:
+              - phy-handle
+          - required:
+              - sfp
+              - managed
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - interrupts
+  - interrupt-names
+  - resets
+  - reset-names
+  - ethernet-ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    switch: switch@600000000 {
+      compatible = "microchip,sparx5-switch";
+      reg =  <0 0x401000>,
+             <0x10004000 0x7fc000>,
+             <0x11010000 0xaf0000>;
+      reg-names = "cpu", "devices", "gcb";
+      interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+      interrupt-names = "xtr";
+      resets = <&reset 0>;
+      reset-names = "switch";
+      ethernet-ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        port0: port@0 {
+          reg = <0>;
+          microchip,bandwidth = <1000>;
+          phys = <&serdes 13>;
+          phy-handle = <&phy0>;
+          phy-mode = "qsgmii";
+        };
+        /* ... */
+        /* Then the 25G interfaces */
+        port60: port@60 {
+          reg = <60>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 29>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth60>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <365>;
+        };
+        port61: port@61 {
+          reg = <61>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 30>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth61>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <369>;
+        };
+        port62: port@62 {
+          reg = <62>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 31>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth62>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <373>;
+        };
+        port63: port@63 {
+          reg = <63>;
+          microchip,bandwidth = <25000>;
+          phys = <&serdes 32>;
+          phy-mode = "10gbase-r";
+          sfp = <&sfp_eth63>;
+          managed = "in-band-status";
+          microchip,sd-sgpio = <377>;
+        };
+        /* Finally the Management interface */
+        port64: port@64 {
+          reg = <64>;
+          microchip,bandwidth = <1000>;
+          phys = <&serdes 0>;
+          phy-handle = <&phy64>;
+          phy-mode = "sgmii";
+          mac-address = [ 00 00 00 01 02 03 ];
+        };
+      };
+    };
+
+...
+#  vim: set ts=2 sw=2 sts=2 tw=80 et cc=80 ft=yaml :
index 5fe6d3d..ed88ba4 100644 (file)
@@ -44,6 +44,7 @@ description:
 properties:
   compatible:
     enum:
+      - qcom,msm8998-ipa
       - qcom,sc7180-ipa
       - qcom,sc7280-ipa
       - qcom,sdm845-ipa
index 9c0ce92..56f2235 100644 (file)
@@ -51,11 +51,15 @@ properties:
         - allwinner,sun8i-r40-emac
         - allwinner,sun8i-v3s-emac
         - allwinner,sun50i-a64-emac
+        - loongson,ls2k-dwmac
+        - loongson,ls7a-dwmac
         - amlogic,meson6-dwmac
         - amlogic,meson8b-dwmac
         - amlogic,meson8m2-dwmac
         - amlogic,meson-gxbb-dwmac
         - amlogic,meson-axg-dwmac
+        - loongson,ls2k-dwmac
+        - loongson,ls7a-dwmac
         - ingenic,jz4775-mac
         - ingenic,x1000-mac
         - ingenic,x1600-mac
@@ -363,6 +367,8 @@ allOf:
               - allwinner,sun8i-r40-emac
               - allwinner,sun8i-v3s-emac
               - allwinner,sun50i-a64-emac
+              - loongson,ls2k-dwmac
+              - loongson,ls7a-dwmac
               - ingenic,jz4775-mac
               - ingenic,x1000-mac
               - ingenic,x1600-mac
index db61f07..2e35aea 100644 (file)
@@ -57,7 +57,7 @@ patternProperties:
           rate
 
       sound-dai:
-        $ref: /schemas/types.yaml#/definitions/phandle
+        $ref: /schemas/types.yaml#/definitions/phandle-array
         description: phandle of the CPU DAI
 
     patternProperties:
@@ -71,7 +71,7 @@ patternProperties:
 
         properties:
           sound-dai:
-            $ref: /schemas/types.yaml#/definitions/phandle
+            $ref: /schemas/types.yaml#/definitions/phandle-array
             description: phandle of the codec DAI
 
         required:
index 820e867..2c94ff2 100644 (file)
@@ -123,6 +123,8 @@ are in ``drivers/usb/common/common.c``.
 In addition, some functions useful for creating debugging output are
 defined in ``drivers/usb/common/debug.c``.
 
+.. _usb_header:
+
 Host-Side Data Types and Macros
 ===============================
 
index 7d01ae8..680ad17 100644 (file)
@@ -27,7 +27,8 @@ network interfaces that have PHYs connected to MAC via MDIO bus.
 During the MDIO bus driver initialization, PHYs on this bus are probed
 using the _ADR object as shown below and are registered on the MDIO bus.
 
-::
+.. code-block:: none
+
       Scope(\_SB.MDI0)
       {
         Device(PHY1) {
@@ -49,6 +50,21 @@ phy-mode
 The "phy-mode" _DSD property is used to describe the connection to
 the PHY. The valid values for "phy-mode" are defined in [4].
 
+managed
+-------
+Optional property, which specifies the PHY management type.
+The valid values for "managed" are defined in [4].
+
+fixed-link
+----------
+The "fixed-link" is described by a data-only subnode of the
+MAC port, which is linked in the _DSD package via
+hierarchical data extension (UUID dbb8e3e6-5886-4ba6-8795-1319f52a966b
+in accordance with [5] "_DSD Implementation Guide" document).
+The subnode should comprise a required property ("speed") and
+possibly the optional ones - complete list of parameters and
+their values are specified in [4].
+
 The following ASL example illustrates the usage of these properties.
 
 DSDT entry for MDIO node
@@ -60,7 +76,9 @@ component (PHYs on the MDIO bus).
 a) Silicon Component
 This node describes the MDIO controller, MDI0
 ---------------------------------------------
-::
+
+.. code-block:: none
+
        Scope(_SB)
        {
          Device(MDI0) {
@@ -80,7 +98,9 @@ This node describes the MDIO controller, MDI0
 b) Platform Component
 The PHY1 and PHY2 nodes represent the PHYs connected to MDIO bus MDI0
 ---------------------------------------------------------------------
-::
+
+.. code-block:: none
+
        Scope(\_SB.MDI0)
        {
          Device(PHY1) {
@@ -98,7 +118,9 @@ DSDT entries representing MAC nodes
 Below are the MAC nodes where PHY nodes are referenced.
 phy-mode and phy-handle are used as explained earlier.
 ------------------------------------------------------
-::
+
+.. code-block:: none
+
        Scope(\_SB.MCE0.PR17)
        {
          Name (_DSD, Package () {
@@ -121,6 +143,48 @@ phy-mode and phy-handle are used as explained earlier.
          })
        }
 
+MAC node example where "managed" property is specified.
+-------------------------------------------------------
+
+.. code-block:: none
+
+       Scope(\_SB.PP21.ETH0)
+       {
+         Name (_DSD, Package () {
+            ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+                Package () {
+                    Package () {"phy-mode", "sgmii"},
+                    Package () {"managed", "in-band-status"}
+                }
+          })
+       }
+
+MAC node example with a "fixed-link" subnode.
+---------------------------------------------
+
+.. code-block:: none
+
+       Scope(\_SB.PP21.ETH1)
+       {
+         Name (_DSD, Package () {
+           ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+                Package () {
+                    Package () {"phy-mode", "sgmii"},
+                },
+           ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+                Package () {
+                    Package () {"fixed-link", "LNK0"}
+                }
+         })
+         Name (LNK0, Package(){ // Data-only subnode of port
+           ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+                Package () {
+                    Package () {"speed", 1000},
+                    Package () {"full-duplex", 1}
+                }
+         })
+       }
+
 References
 ==========
 
@@ -131,3 +195,5 @@ References
 [3] Documentation/firmware-guide/acpi/DSD-properties-rules.rst
 
 [4] Documentation/devicetree/bindings/net/ethernet-controller.yaml
+
+[5] https://github.com/UEFI/DSD-Guide/blob/main/dsd-guide.pdf
index f72b5f1..a99ee40 100644 (file)
@@ -11,6 +11,7 @@ ACPI Support
    dsd/graph
    dsd/data-node-references
    dsd/leds
+   dsd/phy
    enumeration
    osi
    method-customizing
index 793693c..6d73ee7 100644 (file)
@@ -47,13 +47,24 @@ The driver interacts with the device in the following ways:
  - Transmit and Receive Queues
     - See description below
 
+Descriptor Formats
+------------------
+GVE supports two descriptor formats: GQI and DQO. These two formats have
+entirely different descriptors, which will be described below.
+
 Registers
 ---------
-All registers are MMIO and big endian.
+All registers are MMIO.
 
 The registers are used for initializing and configuring the device as well as
 querying device status in response to management interrupts.
 
+Endianness
+----------
+- Admin Queue messages and registers are all Big Endian.
+- GQI descriptors and datapath registers are Big Endian.
+- DQO descriptors and datapath registers are Little Endian.
+
 Admin Queue (AQ)
 ----------------
 The Admin Queue is a PAGE_SIZE memory block, treated as an array of AQ
@@ -97,10 +108,10 @@ the queues associated with that interrupt.
 The handler for these irqs schedule the napi for that block to run
 and poll the queues.
 
-Traffic Queues
---------------
-gVNIC's queues are composed of a descriptor ring and a buffer and are
-assigned to a notification block.
+GQI Traffic Queues
+------------------
+GQI queues are composed of a descriptor ring and a buffer and are assigned to a
+notification block.
 
 The descriptor rings are power-of-two-sized ring buffers consisting of
 fixed-size descriptors. They advance their head pointer using a __be32
@@ -121,3 +132,35 @@ Receive
 The buffers for receive rings are put into a data ring that is the same
 length as the descriptor ring and the head and tail pointers advance over
 the rings together.
+
+DQO Traffic Queues
+------------------
+- Every TX and RX queue is assigned a notification block.
+
+- TX and RX buffers queues, which send descriptors to the device, use MMIO
+  doorbells to notify the device of new descriptors.
+
+- RX and TX completion queues, which receive descriptors from the device, use a
+  "generation bit" to know when a descriptor was populated by the device. The
+  driver initializes all bits with the "current generation". The device will
+  populate received descriptors with the "next generation" which is inverted
+  from the current generation. When the ring wraps, the current/next generation
+  are swapped.
+
+- It's the driver's responsibility to ensure that the RX and TX completion
+  queues are not overrun. This can be accomplished by limiting the number of
+  descriptors posted to HW.
+
+- TX packets have a 16 bit completion_tag and RX buffers have a 16 bit
+  buffer_id. These will be returned on the TX completion and RX queues
+  respectively to let the driver know which packet/buffer was completed.
+
+Transmit
+~~~~~~~~
+A packet's buffers are DMA mapped for the device to access before transmission.
+After the packet was successfully transmitted, the buffers are unmapped.
+
+Receive
+~~~~~~~
+The driver posts fixed sized buffers to HW on the RX buffer queue. The packet
+received on the associated RX queue may span multiple descriptors.
index 7395a33..da4057b 100644 (file)
@@ -5,7 +5,7 @@ NXP SJA1105 switch driver
 Overview
 ========
 
-The NXP SJA1105 is a family of 6 devices:
+The NXP SJA1105 is a family of 10 SPI-managed automotive switches:
 
 - SJA1105E: First generation, no TTEthernet
 - SJA1105T: First generation, TTEthernet
@@ -13,9 +13,11 @@ The NXP SJA1105 is a family of 6 devices:
 - SJA1105Q: Second generation, TTEthernet, no SGMII
 - SJA1105R: Second generation, no TTEthernet, SGMII
 - SJA1105S: Second generation, TTEthernet, SGMII
-
-These are SPI-managed automotive switches, with all ports being gigabit
-capable, and supporting MII/RMII/RGMII and optionally SGMII on one port.
+- SJA1110A: Third generation, TTEthernet, SGMII, integrated 100base-T1 and
+  100base-TX PHYs
+- SJA1110B: Third generation, TTEthernet, SGMII, 100base-T1, 100base-TX
+- SJA1110C: Third generation, TTEthernet, SGMII, 100base-T1, 100base-TX
+- SJA1110D: Third generation, TTEthernet, SGMII, 100base-T1
 
 Being automotive parts, their configuration interface is geared towards
 set-and-forget use, with minimal dynamic interaction at runtime. They
@@ -579,3 +581,54 @@ A board would need to hook up the PHYs connected to the switch to any other
 MDIO bus available to Linux within the system (e.g. to the DSA master's MDIO
 bus). Link state management then works by the driver manually keeping in sync
 (over SPI commands) the MAC link speed with the settings negotiated by the PHY.
+
+By comparison, the SJA1110 supports an MDIO slave access point over which its
+internal 100base-T1 PHYs can be accessed from the host. This is, however, not
+used by the driver, instead the internal 100base-T1 and 100base-TX PHYs are
+accessed through SPI commands, modeled in Linux as virtual MDIO buses.
+
+The microcontroller attached to the SJA1110 port 0 also has an MDIO controller
+operating in master mode, however the driver does not support this either,
+since the microcontroller gets disabled when the Linux driver operates.
+Discrete PHYs connected to the switch ports should have their MDIO interface
+attached to an MDIO controller from the host system and not to the switch,
+similar to SJA1105.
+
+Port compatibility matrix
+-------------------------
+
+The SJA1105 port compatibility matrix is:
+
+===== ============== ============== ==============
+Port   SJA1105E/T     SJA1105P/Q     SJA1105R/S
+===== ============== ============== ==============
+0      xMII           xMII           xMII
+1      xMII           xMII           xMII
+2      xMII           xMII           xMII
+3      xMII           xMII           xMII
+4      xMII           xMII           SGMII
+===== ============== ============== ==============
+
+
+The SJA1110 port compatibility matrix is:
+
+===== ============== ============== ============== ==============
+Port   SJA1110A       SJA1110B       SJA1110C       SJA1110D
+===== ============== ============== ============== ==============
+0      RevMII (uC)    RevMII (uC)    RevMII (uC)    RevMII (uC)
+1      100base-TX     100base-TX     100base-TX
+       or SGMII                                     SGMII
+2      xMII           xMII           xMII           xMII
+       or SGMII                                     or SGMII
+3      xMII           xMII           xMII
+       or SGMII       or SGMII                      SGMII
+       or 2500base-X  or 2500base-X                 or 2500base-X
+4      SGMII          SGMII          SGMII          SGMII
+       or 2500base-X  or 2500base-X  or 2500base-X  or 2500base-X
+5      100base-T1     100base-T1     100base-T1     100base-T1
+6      100base-T1     100base-T1     100base-T1     100base-T1
+7      100base-T1     100base-T1     100base-T1     100base-T1
+8      100base-T1     100base-T1     n/a            n/a
+9      100base-T1     100base-T1     n/a            n/a
+10     100base-T1     n/a            n/a            n/a
+===== ============== ============== ============== ==============
index 25131df..6ea91e4 100644 (file)
@@ -1363,8 +1363,8 @@ in an implementation specific way.
 ``ETHTOOL_A_FEC_AUTO`` requests the driver to choose FEC mode based on SFP
 module parameters. This does not mean autonegotiation.
 
-MODULE_EEPROM
-=============
+MODULE_EEPROM_GET
+=================
 
 Fetch module EEPROM data dump.
 This interface is designed to allow dumps of at most 1/2 page at once. This
@@ -1383,12 +1383,14 @@ Request contents:
   ``ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS``  u8      page I2C address
   =======================================  ======  ==========================
 
+If ``ETHTOOL_A_MODULE_EEPROM_BANK`` is not specified, bank 0 is assumed.
+
 Kernel response contents:
 
  +---------------------------------------------+--------+---------------------+
  | ``ETHTOOL_A_MODULE_EEPROM_HEADER``          | nested | reply header        |
  +---------------------------------------------+--------+---------------------+
- | ``ETHTOOL_A_MODULE_EEPROM_DATA``            | nested | array of bytes from |
+ | ``ETHTOOL_A_MODULE_EEPROM_DATA``            | binary | array of bytes from |
  |                                             |        | module EEPROM       |
  +---------------------------------------------+--------+---------------------+
 
index b0436d3..b3fa522 100644 (file)
@@ -2834,6 +2834,18 @@ encap_port - INTEGER
 
        Default: 0
 
+plpmtud_probe_interval - INTEGER
+        The time interval (in milliseconds) for the PLPMTUD probe timer,
+        which is configured to expire after this period to receive an
+        acknowledgment to a probe packet. This is also the time interval
+        between the probes for the current pmtu when the probe search
+        is done.
+
+        PLPMTUD will be disabled when 0 is set, and other values for it
+        must be >= 5000.
+
+       Default: 0
+
 
 ``/proc/sys/net/core/*``
 ========================
index 3b352e5..76d939e 100644 (file)
@@ -24,3 +24,24 @@ add_addr_timeout - INTEGER (seconds)
        sysctl.
 
        Default: 120
+
+checksum_enabled - BOOLEAN
+       Control whether DSS checksum can be enabled.
+
+       DSS checksum can be enabled if the value is nonzero. This is a
+       per-namespace sysctl.
+
+       Default: 0
+
+allow_join_initial_addr_port - BOOLEAN
+       Allow peers to send join requests to the IP address and port number used
+       by the initial subflow if the value is 1. This controls a flag that is
+       sent to the peer at connection time, and whether such join requests are
+       accepted or denied.
+
+       Joins to addresses advertised with ADD_ADDR are not affected by this
+       value.
+
+       This is a per-namespace sysctl.
+
+       Default: 1
index 11a9b76..0467b30 100644 (file)
@@ -177,3 +177,27 @@ nf_conntrack_gre_timeout_stream - INTEGER (seconds)
 
        This extended timeout will be used in case there is an GRE stream
        detected.
+
+nf_flowtable_tcp_timeout - INTEGER (seconds)
+        default 30
+
+        Control offload timeout for tcp connections.
+        TCP connections may be offloaded from nf conntrack to nf flow table.
+        Once aged, the connection is returned to nf conntrack with tcp pickup timeout.
+
+nf_flowtable_tcp_pickup - INTEGER (seconds)
+        default 120
+
+        TCP connection timeout after being aged from nf flow table offload.
+
+nf_flowtable_udp_timeout - INTEGER (seconds)
+        default 30
+
+        Control offload timeout for udp connections.
+        UDP connections may be offloaded from nf conntrack to nf flow table.
+        Once aged, the connection is returned to nf conntrack with udp pickup timeout.
+
+nf_flowtable_udp_pickup - INTEGER (seconds)
+        default 30
+
+        UDP connection timeout after being aged from nf flow table offload.
index bd91652..6efb41c 100644 (file)
@@ -250,14 +250,14 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)``  (or ``poll()``) on a
 seccomp notification fd to receive a ``struct seccomp_notif``, which contains
 five members: the input length of the structure, a unique-per-filter ``id``,
 the ``pid`` of the task which triggered this request (which may be 0 if the
-task is in a pid ns not visible from the listener's pid namespace), a ``flags``
-member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing
-whether or not the notification is a result of a non-fatal signal, and the
-``data`` passed to seccomp. Userspace can then make a decision based on this
-information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a
-response, indicating what should be returned to userspace. The ``id`` member of
-``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct
-seccomp_notif``.
+task is in a pid ns not visible from the listener's pid namespace). The
+notification also contains the ``data`` passed to seccomp, and a filters flag.
+The structure should be zeroed out prior to calling the ioctl.
+
+Userspace can then make a decision based on this information about what to do,
+and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be
+returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should
+be the same ``id`` as in ``struct seccomp_notif``.
 
 It is worth noting that ``struct seccomp_data`` contains the values of register
 arguments to the syscall, but does not contain pointers to memory. The task's
index 5bfe28b..20d85da 100644 (file)
@@ -171,8 +171,8 @@ Shadow pages contain the following information:
     shadow pages) so role.quadrant takes values in the range 0..3.  Each
     quadrant maps 1GB virtual address space.
   role.access:
-    Inherited guest access permissions in the form uwx.  Note execute
-    permission is positive, not negative.
+    Inherited guest access permissions from the parent ptes in the form uwx.
+    Note execute permission is positive, not negative.
   role.invalid:
     The page is invalid and should not be used.  It is a root page that is
     currently pinned (by a cpu hardware register pointing to it); once it is
index 5feb370..af1b374 100644 (file)
@@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD
   necessary to inform each VCPU to completely refresh the tables.  This
   request is used for that.
 
-KVM_REQ_PENDING_TIMER
+KVM_REQ_UNBLOCK
 
-  This request may be made from a timer handler run on the host on behalf
-  of a VCPU.  It informs the VCPU thread to inject a timer interrupt.
+  This request informs the vCPU to exit kvm_vcpu_block.  It is used for
+  example from timer handlers that run on the host on behalf of a vCPU,
+  or in order to update the interrupt routing and ensure that assigned
+  devices will wake up the vCPU.
 
 KVM_REQ_UNHALT
 
index 03f294a..d302855 100644 (file)
@@ -181,7 +181,7 @@ SLUB Debug output
 Here is a sample of slub debug output::
 
  ====================================================================
- BUG kmalloc-8: Redzone overwritten
+ BUG kmalloc-8: Right Redzone overwritten
  --------------------------------------------------------------------
 
  INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
  INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
  INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
 
- Bytes b4 0xc90f6d10:  00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
  Object 0xc90f6d20:  31 30 31 39 2e 30 30 35                         1019.005
 Redzone 0xc90f6d28:  00 cc cc cc                                     .
 Padding 0xc90f6d50:  5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
+ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
Object   (0xc90f6d20): 31 30 31 39 2e 30 30 35                         1019.005
Redzone  (0xc90f6d28): 00 cc cc cc                                     .
Padding  (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a                         ZZZZZZZZ
 
    [<c010523d>] dump_trace+0x63/0x1eb
    [<c01053df>] show_trace_log_lvl+0x1a/0x2f
index 183cc61..cc375fd 100644 (file)
@@ -3877,6 +3877,7 @@ L:        linux-btrfs@vger.kernel.org
 S:     Maintained
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
+C:     irc://irc.libera.chat/btrfs
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
 F:     Documentation/filesystems/btrfs.rst
 F:     fs/btrfs/
@@ -6947,6 +6948,7 @@ F:        net/core/failover.c
 FANOTIFY
 M:     Jan Kara <jack@suse.cz>
 R:     Amir Goldstein <amir73il@gmail.com>
+R:     Matthew Bobrowski <repnop@google.com>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     fs/notify/fanotify/
@@ -14134,6 +14136,7 @@ F:      drivers/pci/controller/pci-v3-semi.c
 PCI ENDPOINT SUBSYSTEM
 M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+R:     Krzysztof Wilczyński <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     Documentation/PCI/endpoint/*
@@ -14182,6 +14185,7 @@ F:      drivers/pci/controller/pci-xgene-msi.c
 PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 R:     Rob Herring <robh@kernel.org>
+R:     Krzysztof Wilczyński <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 Q:     http://patchwork.ozlabs.org/project/linux-pci/list/
@@ -14341,10 +14345,12 @@ PER-CPU MEMORY ALLOCATOR
 M:     Dennis Zhou <dennis@kernel.org>
 M:     Tejun Heo <tj@kernel.org>
 M:     Christoph Lameter <cl@linux.com>
+L:     linux-mm@kvack.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
 F:     arch/*/include/asm/percpu.h
 F:     include/linux/percpu*.h
+F:     lib/percpu*.c
 F:     mm/percpu*.c
 
 PER-TASK DELAY ACCOUNTING
@@ -15587,6 +15593,13 @@ F:     include/linux/rpmsg/
 F:     include/uapi/linux/rpmsg.h
 F:     samples/rpmsg/
 
+REMOTE PROCESSOR MESSAGING (RPMSG) WWAN CONTROL DRIVER
+M:     Stephan Gerhold <stephan@gerhold.net>
+L:     netdev@vger.kernel.org
+L:     linux-remoteproc@vger.kernel.org
+S:     Maintained
+F:     drivers/net/wwan/rpmsg_wwan_ctrl.c
+
 RENESAS CLOCK DRIVERS
 M:     Geert Uytterhoeven <geert+renesas@glider.be>
 L:     linux-renesas-soc@vger.kernel.org
@@ -16571,6 +16584,7 @@ F:      drivers/misc/sgi-xp/
 
 SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
 M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Guvenc Gulce <guvenc@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -17694,7 +17708,6 @@ R:      Mika Westerberg <mika.westerberg@linux.intel.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-designware-*
-F:     include/linux/platform_data/i2c-designware.h
 
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:     Jaehoon Chung <jh80.chung@samsung.com>
@@ -18886,6 +18899,13 @@ S:     Maintained
 F:     drivers/usb/host/isp116x*
 F:     include/linux/usb/isp116x.h
 
+USB ISP1760 DRIVER
+M:     Rui Miguel Silva <rui.silva@linaro.org>
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+F:     drivers/usb/isp1760/*
+F:     Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
+
 USB LAN78XX ETHERNET DRIVER
 M:     Woojung Huh <woojung.huh@microchip.com>
 M:     UNGLinuxDriver@microchip.com
@@ -19783,6 +19803,16 @@ F:     Documentation/core-api/workqueue.rst
 F:     include/linux/workqueue.h
 F:     kernel/workqueue.c
 
+WWAN DRIVERS
+M:     Loic Poulain <loic.poulain@linaro.org>
+M:     Sergey Ryazanov <ryazanov.s.a@gmail.com>
+R:     Johannes Berg <johannes@sipsolutions.net>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/wwan/
+F:     include/linux/wwan.h
+F:     include/uapi/linux/wwan.h
+
 X-POWERS AXP288 PMIC DRIVERS
 M:     Hans de Goede <hdegoede@redhat.com>
 S:     Maintained
@@ -20030,6 +20060,7 @@ F:      arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
+C:     irc://irc.oftc.net/xfs
 M:     Darrick J. Wong <djwong@kernel.org>
 M:     linux-xfs@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
index e446835..2d7a8df 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 13
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
 NAME = Frozen Wasteland
 
 # *DOCUMENTATION*
@@ -928,6 +928,14 @@ CC_FLAGS_LTO       += -fvisibility=hidden
 
 # Limit inlining across translation units to reduce binary size
 KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
+
+# Check for frame size exceeding threshold during prolog/epilog insertion
+# when using lld < 13.0.0.
+ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
+endif
+endif
 endif
 
 ifdef CONFIG_LTO
index 5742035..6b3daba 100644 (file)
 #define SO_PREFER_BUSY_POLL    69
 #define SO_BUSY_POLL_BUDGET    70
 
+#define SO_NETNS_COOKIE                71
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64
index 95f8a43..7a5449d 100644 (file)
@@ -18,6 +18,7 @@
  */
 struct sigcontext {
        struct user_regs_struct regs;
+       struct user_regs_arcv2 v2abi;
 };
 
 #endif /* _ASM_ARC_SIGCONTEXT_H */
index b3ccb9e..cb2f885 100644 (file)
@@ -61,6 +61,41 @@ struct rt_sigframe {
        unsigned int sigret_magic;
 };
 
+static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+       int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+       struct user_regs_arcv2 v2abi;
+
+       v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       v2abi.r58 = regs->r58;
+       v2abi.r59 = regs->r59;
+#else
+       v2abi.r58 = v2abi.r59 = 0;
+#endif
+       err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+#endif
+       return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+       int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+       struct user_regs_arcv2 v2abi;
+
+       err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+       regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+       regs->r58 = v2abi.r58;
+       regs->r59 = v2abi.r59;
+#endif
+#endif
+       return err;
+}
+
 static int
 stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
               sigset_t *set)
@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
 
        err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+       if (is_isa_arcv2())
+               err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
        return err ? -EFAULT : 0;
@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
        err |= __copy_from_user(&uregs.scratch,
                                &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+       if (is_isa_arcv2())
+               err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
        if (err)
                return -EFAULT;
 
index 33ce59d..e2146a8 100644 (file)
@@ -57,7 +57,6 @@ SECTIONS
        .init.ramfs : { INIT_RAM_FS }
 
        . = ALIGN(PAGE_SIZE);
-       _stext = .;
 
        HEAD_TEXT_SECTION
        INIT_TEXT_SECTION(L1_CACHE_BYTES)
@@ -83,6 +82,7 @@ SECTIONS
 
        .text : {
                _text = .;
+               _stext = .;
                TEXT_TEXT
                SCHED_TEXT
                CPUIDLE_TEXT
index 7d2c725..9148a01 100644 (file)
        phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
        phy-reset-duration = <20>;
        phy-supply = <&sw2_reg>;
-       phy-handle = <&ethphy0>;
        status = "okay";
 
+       fixed-link {
+               speed = <1000>;
+               full-duplex;
+       };
+
        mdio {
                #address-cells = <1>;
                #size-cells = <0>;
index 236fc20..d0768ae 100644 (file)
        vin-supply = <&sw1_reg>;
 };
 
+&reg_pu {
+       vin-supply = <&sw1_reg>;
+};
+
+&reg_vdd1p1 {
+       vin-supply = <&sw2_reg>;
+};
+
+&reg_vdd2p5 {
+       vin-supply = <&sw2_reg>;
+};
+
 &uart1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_uart1>;
index 828cf3e..c4e146f 100644 (file)
                compatible = "nxp,pca8574";
                reg = <0x3a>;
                gpio-controller;
-               #gpio-cells = <1>;
+               #gpio-cells = <2>;
        };
 };
 
index 5339210..dd8003b 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
        keep-power-in-suspend;
-       tuning-step = <2>;
+       fsl,tuning-step = <2>;
        vmmc-supply = <&reg_3p3v>;
        no-1-8-v;
        broken-cd;
index e57da0d..e519897 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
        cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
        bus-width = <4>;
-       tuning-step = <2>;
+       fsl,tuning-step = <2>;
        vmmc-supply = <&reg_3p3v>;
        wakeup-source;
        no-1-8-v;
index 0d67ed6..bc4ffa7 100644 (file)
@@ -7,9 +7,11 @@
 #ifdef CONFIG_CPU_IDLE
 extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
 #else
 static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
 #endif
 
 /* Common ARM WFI state */
@@ -42,8 +44,7 @@ struct of_cpuidle_method {
 
 #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops)                 \
        static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
-       __used __section("__cpuidle_method_of_table")                   \
-       = { .method = _method, .ops = _ops }
+       __cpuidle_method_section = { .method = _method, .ops = _ops }
 
 extern int arm_cpuidle_suspend(int index);
 
index 020e6de..237e8aa 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/suspend.h>
 #include <linux/io.h>
 
+#include "common.h"
 #include "hardware.h"
 
 static int mx27_suspend_enter(suspend_state_t state)
index 2ee527c..1026a81 100644 (file)
@@ -458,20 +458,6 @@ static struct gpiod_lookup_table leds_gpio_table = {
 
 #ifdef CONFIG_LEDS_TRIGGERS
 DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
-
-static int ams_delta_camera_power(struct device *dev, int power)
-{
-       /*
-        * turn on camera LED
-        */
-       if (power)
-               led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
-       else
-               led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
-       return 0;
-}
-#else
-#define ams_delta_camera_power NULL
 #endif
 
 static struct platform_device ams_delta_audio_device = {
index c40cf5e..977b0b7 100644 (file)
@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
 {
        if (!IS_BUILTIN(CONFIG_TPS65010))
                return -ENOSYS;
-       
+
        tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
                                TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
 
@@ -394,6 +394,8 @@ static void __init h2_init(void)
        BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
        gpio_direction_input(H2_NAND_RB_GPIO_PIN);
 
+       gpiod_add_lookup_table(&isp1301_gpiod_table);
+
        omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
        omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
 
index 2c1e2b3..a745d64 100644 (file)
@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
                irq = INT_7XX_WAKE_UP_REQ;
        else if (cpu_is_omap16xx())
                irq = INT_1610_WAKE_UP_REQ;
-       if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
-                       NULL))
-               pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+       else
+               irq = -1;
+
+       if (irq >= 0) {
+               if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
+                       pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+       }
 
        /* Program new power ramp-up time
         * (0 for most boards since we don't lower voltage when in deep sleep)
index 418a61e..5e86145 100644 (file)
@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
 
 static void n8x0_mmc_callback(void *data, u8 card_mask)
 {
+#ifdef CONFIG_MMC_OMAP
        int bit, *openp, index;
 
        if (board_is_n800()) {
@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
        else
                *openp = 0;
 
-#ifdef CONFIG_MMC_OMAP
        omap_mmc_notify_cover_event(mmc_device, index, *openp);
 #else
        pr_warn("MMC: notify cover event not available\n");
index 6409b47..7336c1f 100644 (file)
@@ -165,6 +165,7 @@ config ARCH_MEDIATEK
 
 config ARCH_MESON
        bool "Amlogic Platforms"
+       select COMMON_CLK
        select MESON_IRQ_GPIO
        help
          This enables support for the arm64 based Amlogic SoCs
index 6c309b9..e8d3127 100644 (file)
@@ -46,7 +46,8 @@
                        eee-broken-100tx;
                        qca,clk-out-frequency = <125000000>;
                        qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-                       vddio-supply = <&vddh>;
+                       qca,keep-pll-enabled;
+                       vddio-supply = <&vddio>;
 
                        vddio: vddio-regulator {
                                regulator-name = "VDDIO";
index df212ed..e65d1c4 100644 (file)
                        reg = <0x4>;
                        eee-broken-1000t;
                        eee-broken-100tx;
-
                        qca,clk-out-frequency = <125000000>;
                        qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-
-                       vddio-supply = <&vddh>;
+                       qca,keep-pll-enabled;
+                       vddio-supply = <&vddio>;
 
                        vddio: vddio-regulator {
                                regulator-name = "VDDIO";
index eca06a0..a30249e 100644 (file)
                ddr: memory-controller@1080000 {
                        compatible = "fsl,qoriq-memory-controller";
                        reg = <0x0 0x1080000 0x0 0x1000>;
-                       interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
-                       big-endian;
+                       interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+                       little-endian;
                };
 
                dcfg: syscon@1e00000 {
index 631e01c..be1e7d6 100644 (file)
                pinctrl-0 = <&pinctrl_codec2>;
                reg = <0x18>;
                #sound-dai-cells = <0>;
-               HPVDD-supply = <&reg_3p3v>;
-               SPRVDD-supply = <&reg_3p3v>;
-               SPLVDD-supply = <&reg_3p3v>;
-               AVDD-supply = <&reg_3p3v>;
-               IOVDD-supply = <&reg_3p3v>;
+               HPVDD-supply = <&reg_gen_3p3>;
+               SPRVDD-supply = <&reg_gen_3p3>;
+               SPLVDD-supply = <&reg_gen_3p3>;
+               AVDD-supply = <&reg_gen_3p3>;
+               IOVDD-supply = <&reg_gen_3p3>;
                DVDD-supply = <&vgen4_reg>;
                reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
        };
index 4dc8383..a08a568 100644 (file)
@@ -45,8 +45,8 @@
        reg_12p0_main: regulator-12p0-main {
                compatible = "regulator-fixed";
                regulator-name = "12V_MAIN";
-               regulator-min-microvolt = <5000000>;
-               regulator-max-microvolt = <5000000>;
+               regulator-min-microvolt = <12000000>;
+               regulator-max-microvolt = <12000000>;
                regulator-always-on;
        };
 
                regulator-always-on;
        };
 
-       reg_3p3v: regulator-3p3v {
-               compatible = "regulator-fixed";
-               vin-supply = <&reg_3p3_main>;
-               regulator-name = "GEN_3V3";
-               regulator-min-microvolt = <3300000>;
-               regulator-max-microvolt = <3300000>;
-               regulator-always-on;
-       };
-
        reg_usdhc2_vmmc: regulator-vsd-3v3 {
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_usdhc2>;
                pinctrl-0 = <&pinctrl_codec1>;
                reg = <0x18>;
                #sound-dai-cells = <0>;
-               HPVDD-supply = <&reg_3p3v>;
-               SPRVDD-supply = <&reg_3p3v>;
-               SPLVDD-supply = <&reg_3p3v>;
-               AVDD-supply = <&reg_3p3v>;
-               IOVDD-supply = <&reg_3p3v>;
+               HPVDD-supply = <&reg_gen_3p3>;
+               SPRVDD-supply = <&reg_gen_3p3>;
+               SPLVDD-supply = <&reg_gen_3p3>;
+               AVDD-supply = <&reg_gen_3p3>;
+               IOVDD-supply = <&reg_gen_3p3>;
                DVDD-supply = <&vgen4_reg>;
                reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
        };
index d64621d..ad07fff 100644 (file)
                        };
                };
 
-               reset@611010008 {
-                       compatible = "microchip,sparx5-chip-reset";
+               reset: reset-controller@611010008 {
+                       compatible = "microchip,sparx5-switch-reset";
                        reg = <0x6 0x11010008 0x4>;
+                       reg-names = "gcb";
+                       #reset-cells = <1>;
+                       cpu-syscon = <&cpu_ctrl>;
                };
 
                uart0: serial@600100000 {
                                        "GPIO_46", "GPIO_47";
                                function = "emmc";
                        };
+
+                       miim1_pins: miim1-pins {
+                               pins = "GPIO_56", "GPIO_57";
+                               function = "miim";
+                       };
+
+                       miim2_pins: miim2-pins {
+                               pins = "GPIO_58", "GPIO_59";
+                               function = "miim";
+                       };
+
+                       miim3_pins: miim3-pins {
+                               pins = "GPIO_52", "GPIO_53";
+                               function = "miim";
+                       };
                };
 
                sgpio0: gpio@61101036c {
                        clocks = <&sys_clk>;
                        pinctrl-0 = <&sgpio0_pins>;
                        pinctrl-names = "default";
+                       resets = <&reset 0>;
+                       reset-names = "switch";
                        reg = <0x6 0x1101036c 0x100>;
                        sgpio_in0: gpio@0 {
                                compatible = "microchip,sparx5-sgpio-bank";
                                gpio-controller;
                                #gpio-cells = <3>;
                                ngpios = <96>;
+                               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-controller;
+                               #interrupt-cells = <3>;
                        };
                        sgpio_out0: gpio@1 {
                                compatible = "microchip,sparx5-sgpio-bank";
                        clocks = <&sys_clk>;
                        pinctrl-0 = <&sgpio1_pins>;
                        pinctrl-names = "default";
+                       resets = <&reset 0>;
+                       reset-names = "switch";
                        reg = <0x6 0x11010484 0x100>;
                        sgpio_in1: gpio@0 {
                                compatible = "microchip,sparx5-sgpio-bank";
                                gpio-controller;
                                #gpio-cells = <3>;
                                ngpios = <96>;
+                               interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-controller;
+                               #interrupt-cells = <3>;
                        };
                        sgpio_out1: gpio@1 {
                                compatible = "microchip,sparx5-sgpio-bank";
                        clocks = <&sys_clk>;
                        pinctrl-0 = <&sgpio2_pins>;
                        pinctrl-names = "default";
+                       resets = <&reset 0>;
+                       reset-names = "switch";
                        reg = <0x6 0x1101059c 0x100>;
                        sgpio_in2: gpio@0 {
                                reg = <0>;
                                gpio-controller;
                                #gpio-cells = <3>;
                                ngpios = <96>;
+                               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-controller;
+                               #interrupt-cells = <3>;
                        };
                        sgpio_out2: gpio@1 {
                                compatible = "microchip,sparx5-sgpio-bank";
                        #thermal-sensor-cells = <0>;
                        clocks = <&ahb_clk>;
                };
+
+               mdio0: mdio@6110102b0 {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x110102b0 0x24>;
+               };
+
+               mdio1: mdio@6110102d4 {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       pinctrl-0 = <&miim1_pins>;
+                       pinctrl-names = "default";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x110102d4 0x24>;
+               };
+
+               mdio2: mdio@6110102f8 {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       pinctrl-0 = <&miim2_pins>;
+                       pinctrl-names = "default";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x110102d4 0x24>;
+               };
+
+               mdio3: mdio@61101031c {
+                       compatible = "mscc,ocelot-miim";
+                       status = "disabled";
+                       pinctrl-0 = <&miim3_pins>;
+                       pinctrl-names = "default";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x6 0x1101031c 0x24>;
+               };
+
+               serdes: serdes@10808000 {
+                       compatible = "microchip,sparx5-serdes";
+                       #phy-cells = <1>;
+                       clocks = <&sys_clk>;
+                       reg = <0x6 0x10808000 0x5d0000>;
+               };
+
+               switch: switch@0x600000000 {
+                       compatible = "microchip,sparx5-switch";
+                       reg =   <0x6 0 0x401000>,
+                               <0x6 0x10004000 0x7fc000>,
+                               <0x6 0x11010000 0xaf0000>;
+                       reg-names = "cpu", "dev", "gcb";
+                       interrupt-names = "xtr";
+                       interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+                       resets = <&reset 0>;
+                       reset-names = "switch";
+               };
        };
 };
index f0c9151..33faf1f 100644 (file)
@@ -7,30 +7,6 @@
 #include "sparx5_pcb_common.dtsi"
 
 /{
-       aliases {
-           i2c0   = &i2c0;
-           i2c100 = &i2c100;
-           i2c101 = &i2c101;
-           i2c102 = &i2c102;
-           i2c103 = &i2c103;
-           i2c104 = &i2c104;
-           i2c105 = &i2c105;
-           i2c106 = &i2c106;
-           i2c107 = &i2c107;
-           i2c108 = &i2c108;
-           i2c109 = &i2c109;
-           i2c110 = &i2c110;
-           i2c111 = &i2c111;
-           i2c112 = &i2c112;
-           i2c113 = &i2c113;
-           i2c114 = &i2c114;
-           i2c115 = &i2c115;
-           i2c116 = &i2c116;
-           i2c117 = &i2c117;
-           i2c118 = &i2c118;
-           i2c119 = &i2c119;
-       };
-
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
 
 &spi0 {
        status = "okay";
-       spi@0 {
-               compatible = "spi-mux";
-               mux-controls = <&mux>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               reg = <0>;      /* CS0 */
-               spi-flash@9 {
-                       compatible = "jedec,spi-nor";
-                       spi-max-frequency = <8000000>;
-                       reg = <0x9>;    /* SPI */
-               };
+       spi-flash@0 {
+               compatible = "jedec,spi-nor";
+               spi-max-frequency = <8000000>;
+               reg = <0>;
        };
 };
 
        };
 };
 
+&sgpio0 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <8 15>;
+       gpio@0 {
+               ngpios = <64>;
+       };
+       gpio@1 {
+               ngpios = <64>;
+       };
+};
+
+&sgpio1 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <24 31>;
+       gpio@0 {
+               ngpios = <64>;
+       };
+       gpio@1 {
+               ngpios = <64>;
+       };
+};
+
+&sgpio2 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <0 0>, <11 31>;
+};
+
 &gpio {
        i2cmux_pins_i: i2cmux-pins-i {
               pins = "GPIO_16", "GPIO_17", "GPIO_18", "GPIO_19",
 
 &i2c0_imux {
        pinctrl-names =
-               "i2c100", "i2c101", "i2c102", "i2c103",
-               "i2c104", "i2c105", "i2c106", "i2c107",
-               "i2c108", "i2c109", "i2c110", "i2c111", "idle";
+               "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
+               "i2c_sfp5", "i2c_sfp6", "i2c_sfp7", "i2c_sfp8",
+               "i2c_sfp9", "i2c_sfp10", "i2c_sfp11", "i2c_sfp12", "idle";
        pinctrl-0 = <&i2cmux_0>;
        pinctrl-1 = <&i2cmux_1>;
        pinctrl-2 = <&i2cmux_2>;
        pinctrl-10 = <&i2cmux_10>;
        pinctrl-11 = <&i2cmux_11>;
        pinctrl-12 = <&i2cmux_pins_i>;
-       i2c100: i2c_sfp1 {
+       i2c_sfp1: i2c_sfp1 {
                reg = <0x0>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c101: i2c_sfp2 {
+       i2c_sfp2: i2c_sfp2 {
                reg = <0x1>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c102: i2c_sfp3 {
+       i2c_sfp3: i2c_sfp3 {
                reg = <0x2>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c103: i2c_sfp4 {
+       i2c_sfp4: i2c_sfp4 {
                reg = <0x3>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c104: i2c_sfp5 {
+       i2c_sfp5: i2c_sfp5 {
                reg = <0x4>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c105: i2c_sfp6 {
+       i2c_sfp6: i2c_sfp6 {
                reg = <0x5>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c106: i2c_sfp7 {
+       i2c_sfp7: i2c_sfp7 {
                reg = <0x6>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c107: i2c_sfp8 {
+       i2c_sfp8: i2c_sfp8 {
                reg = <0x7>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c108: i2c_sfp9 {
+       i2c_sfp9: i2c_sfp9 {
                reg = <0x8>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c109: i2c_sfp10 {
+       i2c_sfp10: i2c_sfp10 {
                reg = <0x9>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c110: i2c_sfp11 {
+       i2c_sfp11: i2c_sfp11 {
                reg = <0xa>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c111: i2c_sfp12 {
+       i2c_sfp12: i2c_sfp12 {
                reg = <0xb>;
                #address-cells = <1>;
                #size-cells = <0>;
                     &gpio 61 GPIO_ACTIVE_HIGH
                     &gpio 54 GPIO_ACTIVE_HIGH>;
        idle-state = <0x8>;
-       i2c112: i2c_sfp13 {
+       i2c_sfp13: i2c_sfp13 {
                reg = <0x0>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c113: i2c_sfp14 {
+       i2c_sfp14: i2c_sfp14 {
                reg = <0x1>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c114: i2c_sfp15 {
+       i2c_sfp15: i2c_sfp15 {
                reg = <0x2>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c115: i2c_sfp16 {
+       i2c_sfp16: i2c_sfp16 {
                reg = <0x3>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c116: i2c_sfp17 {
+       i2c_sfp17: i2c_sfp17 {
                reg = <0x4>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c117: i2c_sfp18 {
+       i2c_sfp18: i2c_sfp18 {
                reg = <0x5>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c118: i2c_sfp19 {
+       i2c_sfp19: i2c_sfp19 {
                reg = <0x6>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c119: i2c_sfp20 {
+       i2c_sfp20: i2c_sfp20 {
                reg = <0x7>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
 };
+
+&mdio3 {
+       status = "ok";
+       phy64: ethernet-phy@64 {
+               reg = <28>;
+       };
+};
+
+&axi {
+       sfp_eth12: sfp-eth12 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp1>;
+               tx-disable-gpios = <&sgpio_out2 11 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 11 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 11 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 12 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth13: sfp-eth13 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp2>;
+               tx-disable-gpios = <&sgpio_out2 12 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 12 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 12 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 13 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth14: sfp-eth14 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp3>;
+               tx-disable-gpios = <&sgpio_out2 13 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 13 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 13 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 14 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth15: sfp-eth15 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp4>;
+               tx-disable-gpios = <&sgpio_out2 14 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 14 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 14 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 15 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth48: sfp-eth48 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp5>;
+               tx-disable-gpios = <&sgpio_out2 15 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 15 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 15 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 16 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth49: sfp-eth49 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp6>;
+               tx-disable-gpios = <&sgpio_out2 16 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 16 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 16 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 17 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth50: sfp-eth50 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp7>;
+               tx-disable-gpios = <&sgpio_out2 17 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 17 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 17 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 18 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth51: sfp-eth51 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp8>;
+               tx-disable-gpios = <&sgpio_out2 18 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 18 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 18 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 19 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth52: sfp-eth52 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp9>;
+               tx-disable-gpios = <&sgpio_out2 19 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 19 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 19 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 20 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth53: sfp-eth53 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp10>;
+               tx-disable-gpios = <&sgpio_out2 20 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 20 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 20 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 21 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth54: sfp-eth54 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp11>;
+               tx-disable-gpios = <&sgpio_out2 21 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 21 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 21 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 22 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth55: sfp-eth55 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp12>;
+               tx-disable-gpios = <&sgpio_out2 22 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 22 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 22 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 23 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth56: sfp-eth56 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp13>;
+               tx-disable-gpios = <&sgpio_out2 23 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 23 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 23 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 24 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth57: sfp-eth57 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp14>;
+               tx-disable-gpios = <&sgpio_out2 24 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 24 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 24 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 25 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth58: sfp-eth58 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp15>;
+               tx-disable-gpios = <&sgpio_out2 25 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 25 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 25 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 26 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth59: sfp-eth59 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp16>;
+               tx-disable-gpios = <&sgpio_out2 26 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 26 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 26 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 27 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth60: sfp-eth60 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp17>;
+               tx-disable-gpios = <&sgpio_out2 27 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 27 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 27 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth61: sfp-eth61 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp18>;
+               tx-disable-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 28 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 28 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth62: sfp-eth62 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp19>;
+               tx-disable-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 29 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 29 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth63: sfp-eth63 {
+               compatible       = "sff,sfp";
+               i2c-bus          = <&i2c_sfp20>;
+               tx-disable-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_LOW>;
+               los-gpios        = <&sgpio_in2 30 1 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios   = <&sgpio_in2 30 2 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios   = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+       };
+};
+
+&switch {
+       ethernet-ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               /* 10G SFPs */
+               port12: port@12 {
+                       reg = <12>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 13>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth12>;
+                       microchip,sd-sgpio = <301>;
+                       managed = "in-band-status";
+               };
+               port13: port@13 {
+                       reg = <13>;
+                       /* Example: CU SFP, 1G speed */
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 14>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth13>;
+                       microchip,sd-sgpio = <305>;
+                       managed = "in-band-status";
+               };
+               port14: port@14 {
+                       reg = <14>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 15>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth14>;
+                       microchip,sd-sgpio = <309>;
+                       managed = "in-band-status";
+               };
+               port15: port@15 {
+                       reg = <15>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 16>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth15>;
+                       microchip,sd-sgpio = <313>;
+                       managed = "in-band-status";
+               };
+               port48: port@48 {
+                       reg = <48>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 17>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth48>;
+                       microchip,sd-sgpio = <317>;
+                       managed = "in-band-status";
+               };
+               port49: port@49 {
+                       reg = <49>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 18>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth49>;
+                       microchip,sd-sgpio = <321>;
+                       managed = "in-band-status";
+               };
+               port50: port@50 {
+                       reg = <50>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 19>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth50>;
+                       microchip,sd-sgpio = <325>;
+                       managed = "in-band-status";
+               };
+               port51: port@51 {
+                       reg = <51>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 20>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth51>;
+                       microchip,sd-sgpio = <329>;
+                       managed = "in-band-status";
+               };
+               port52: port@52 {
+                       reg = <52>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 21>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth52>;
+                       microchip,sd-sgpio = <333>;
+                       managed = "in-band-status";
+               };
+               port53: port@53 {
+                       reg = <53>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 22>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth53>;
+                       microchip,sd-sgpio = <337>;
+                       managed = "in-band-status";
+               };
+               port54: port@54 {
+                       reg = <54>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 23>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth54>;
+                       microchip,sd-sgpio = <341>;
+                       managed = "in-band-status";
+               };
+               port55: port@55 {
+                       reg = <55>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 24>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth55>;
+                       microchip,sd-sgpio = <345>;
+                       managed = "in-band-status";
+               };
+               /* 25G SFPs */
+               port56: port@56 {
+                       reg = <56>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 25>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth56>;
+                       microchip,sd-sgpio = <349>;
+                       managed = "in-band-status";
+               };
+               port57: port@57 {
+                       reg = <57>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 26>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth57>;
+                       microchip,sd-sgpio = <353>;
+                       managed = "in-band-status";
+               };
+               port58: port@58 {
+                       reg = <58>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 27>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth58>;
+                       microchip,sd-sgpio = <357>;
+                       managed = "in-band-status";
+               };
+               port59: port@59 {
+                       reg = <59>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 28>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth59>;
+                       microchip,sd-sgpio = <361>;
+                       managed = "in-band-status";
+               };
+               port60: port@60 {
+                       reg = <60>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 29>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth60>;
+                       microchip,sd-sgpio = <365>;
+                       managed = "in-band-status";
+               };
+               port61: port@61 {
+                       reg = <61>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 30>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth61>;
+                       microchip,sd-sgpio = <369>;
+                       managed = "in-band-status";
+               };
+               port62: port@62 {
+                       reg = <62>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 31>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth62>;
+                       microchip,sd-sgpio = <373>;
+                       managed = "in-band-status";
+               };
+               port63: port@63 {
+                       reg = <63>;
+                       microchip,bandwidth = <10000>;
+                       phys = <&serdes 32>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth63>;
+                       microchip,sd-sgpio = <377>;
+                       managed = "in-band-status";
+               };
+               /* Finally the Management interface */
+               port64: port@64 {
+                       reg = <64>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 0>;
+                       phy-handle = <&phy64>;
+                       phy-mode = "sgmii";
+               };
+       };
+};
index e28c6dd..ef96e6d 100644 (file)
@@ -7,14 +7,6 @@
 #include "sparx5_pcb_common.dtsi"
 
 /{
-       aliases {
-           i2c0   = &i2c0;
-           i2c152 = &i2c152;
-           i2c153 = &i2c153;
-           i2c154 = &i2c154;
-           i2c155 = &i2c155;
-       };
-
        gpio-restart {
                compatible = "gpio-restart";
                gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
 
 &spi0 {
        status = "okay";
-       spi@0 {
-               compatible = "spi-mux";
-               mux-controls = <&mux>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-               reg = <0>; /* CS0 */
-               spi-flash@9 {
-                       compatible = "jedec,spi-nor";
-                       spi-max-frequency = <8000000>;
-                       reg = <0x9>; /* SPI */
-               };
+       spi-flash@0 {
+               compatible = "jedec,spi-nor";
+               spi-max-frequency = <8000000>;
+               reg = <0>;
        };
 };
 
        };
 };
 
+&sgpio2 {
+       status = "okay";
+       microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>;
+};
+
 &axi {
        i2c0_imux: i2c0-imux@0 {
                compatible = "i2c-mux-pinctrl";
 
 &i2c0_imux {
        pinctrl-names =
-               "i2c152", "i2c153", "i2c154", "i2c155",
+               "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
                "idle";
        pinctrl-0 = <&i2cmux_s29>;
        pinctrl-1 = <&i2cmux_s30>;
        pinctrl-2 = <&i2cmux_s31>;
        pinctrl-3 = <&i2cmux_s32>;
        pinctrl-4 = <&i2cmux_pins_i>;
-       i2c152: i2c_sfp1 {
+       i2c_sfp1: i2c_sfp1 {
                reg = <0x0>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c153: i2c_sfp2 {
+       i2c_sfp2: i2c_sfp2 {
                reg = <0x1>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c154: i2c_sfp3 {
+       i2c_sfp3: i2c_sfp3 {
                reg = <0x2>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
-       i2c155: i2c_sfp4 {
+       i2c_sfp4: i2c_sfp4 {
                reg = <0x3>;
                #address-cells = <1>;
                #size-cells = <0>;
        };
 };
+
+&axi {
+       sfp_eth60: sfp-eth60 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp1>;
+               tx-disable-gpios   = <&sgpio_out2 28 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 28 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 28 2 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth61: sfp-eth61 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp2>;
+               tx-disable-gpios   = <&sgpio_out2 29 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 29 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 29 2 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth62: sfp-eth62 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp3>;
+               tx-disable-gpios   = <&sgpio_out2 30 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 30 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 30 2 GPIO_ACTIVE_HIGH>;
+       };
+       sfp_eth63: sfp-eth63 {
+               compatible         = "sff,sfp";
+               i2c-bus            = <&i2c_sfp4>;
+               tx-disable-gpios   = <&sgpio_out2 31 0 GPIO_ACTIVE_LOW>;
+               rate-select0-gpios = <&sgpio_out2 31 1 GPIO_ACTIVE_HIGH>;
+               los-gpios          = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+               mod-def0-gpios     = <&sgpio_in2 31 1 GPIO_ACTIVE_LOW>;
+               tx-fault-gpios     = <&sgpio_in2 31 2 GPIO_ACTIVE_HIGH>;
+       };
+};
+
+&mdio0 {
+       status = "ok";
+       phy0: ethernet-phy@0 {
+               reg = <0>;
+       };
+       phy1: ethernet-phy@1 {
+               reg = <1>;
+       };
+       phy2: ethernet-phy@2 {
+               reg = <2>;
+       };
+       phy3: ethernet-phy@3 {
+               reg = <3>;
+       };
+       phy4: ethernet-phy@4 {
+               reg = <4>;
+       };
+       phy5: ethernet-phy@5 {
+               reg = <5>;
+       };
+       phy6: ethernet-phy@6 {
+               reg = <6>;
+       };
+       phy7: ethernet-phy@7 {
+               reg = <7>;
+       };
+       phy8: ethernet-phy@8 {
+               reg = <8>;
+       };
+       phy9: ethernet-phy@9 {
+               reg = <9>;
+       };
+       phy10: ethernet-phy@10 {
+               reg = <10>;
+       };
+       phy11: ethernet-phy@11 {
+               reg = <11>;
+       };
+       phy12: ethernet-phy@12 {
+               reg = <12>;
+       };
+       phy13: ethernet-phy@13 {
+               reg = <13>;
+       };
+       phy14: ethernet-phy@14 {
+               reg = <14>;
+       };
+       phy15: ethernet-phy@15 {
+               reg = <15>;
+       };
+       phy16: ethernet-phy@16 {
+               reg = <16>;
+       };
+       phy17: ethernet-phy@17 {
+               reg = <17>;
+       };
+       phy18: ethernet-phy@18 {
+               reg = <18>;
+       };
+       phy19: ethernet-phy@19 {
+               reg = <19>;
+       };
+       phy20: ethernet-phy@20 {
+               reg = <20>;
+       };
+       phy21: ethernet-phy@21 {
+               reg = <21>;
+       };
+       phy22: ethernet-phy@22 {
+               reg = <22>;
+       };
+       phy23: ethernet-phy@23 {
+               reg = <23>;
+       };
+};
+
+&mdio1 {
+       status = "ok";
+       phy24: ethernet-phy@24 {
+               reg = <0>;
+       };
+       phy25: ethernet-phy@25 {
+               reg = <1>;
+       };
+       phy26: ethernet-phy@26 {
+               reg = <2>;
+       };
+       phy27: ethernet-phy@27 {
+               reg = <3>;
+       };
+       phy28: ethernet-phy@28 {
+               reg = <4>;
+       };
+       phy29: ethernet-phy@29 {
+               reg = <5>;
+       };
+       phy30: ethernet-phy@30 {
+               reg = <6>;
+       };
+       phy31: ethernet-phy@31 {
+               reg = <7>;
+       };
+       phy32: ethernet-phy@32 {
+               reg = <8>;
+       };
+       phy33: ethernet-phy@33 {
+               reg = <9>;
+       };
+       phy34: ethernet-phy@34 {
+               reg = <10>;
+       };
+       phy35: ethernet-phy@35 {
+               reg = <11>;
+       };
+       phy36: ethernet-phy@36 {
+               reg = <12>;
+       };
+       phy37: ethernet-phy@37 {
+               reg = <13>;
+       };
+       phy38: ethernet-phy@38 {
+               reg = <14>;
+       };
+       phy39: ethernet-phy@39 {
+               reg = <15>;
+       };
+       phy40: ethernet-phy@40 {
+               reg = <16>;
+       };
+       phy41: ethernet-phy@41 {
+               reg = <17>;
+       };
+       phy42: ethernet-phy@42 {
+               reg = <18>;
+       };
+       phy43: ethernet-phy@43 {
+               reg = <19>;
+       };
+       phy44: ethernet-phy@44 {
+               reg = <20>;
+       };
+       phy45: ethernet-phy@45 {
+               reg = <21>;
+       };
+       phy46: ethernet-phy@46 {
+               reg = <22>;
+       };
+       phy47: ethernet-phy@47 {
+               reg = <23>;
+       };
+};
+
+&mdio3 {
+       status = "ok";
+       phy64: ethernet-phy@64 {
+               reg = <28>;
+       };
+};
+
+&switch {
+       ethernet-ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               port0: port@0 {
+                       reg = <0>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy0>;
+                       phy-mode = "qsgmii";
+               };
+               port1: port@1 {
+                       reg = <1>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy1>;
+                       phy-mode = "qsgmii";
+               };
+               port2: port@2 {
+                       reg = <2>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy2>;
+                       phy-mode = "qsgmii";
+               };
+               port3: port@3 {
+                       reg = <3>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 13>;
+                       phy-handle = <&phy3>;
+                       phy-mode = "qsgmii";
+               };
+               port4: port@4 {
+                       reg = <4>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy4>;
+                       phy-mode = "qsgmii";
+               };
+               port5: port@5 {
+                       reg = <5>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy5>;
+                       phy-mode = "qsgmii";
+               };
+               port6: port@6 {
+                       reg = <6>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy6>;
+                       phy-mode = "qsgmii";
+               };
+               port7: port@7 {
+                       reg = <7>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 14>;
+                       phy-handle = <&phy7>;
+                       phy-mode = "qsgmii";
+               };
+               port8: port@8 {
+                       reg = <8>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy8>;
+                       phy-mode = "qsgmii";
+               };
+               port9: port@9 {
+                       reg = <9>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy9>;
+                       phy-mode = "qsgmii";
+               };
+               port10: port@10 {
+                       reg = <10>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy10>;
+                       phy-mode = "qsgmii";
+               };
+               port11: port@11 {
+                       reg = <11>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 15>;
+                       phy-handle = <&phy11>;
+                       phy-mode = "qsgmii";
+               };
+               port12: port@12 {
+                       reg = <12>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy12>;
+                       phy-mode = "qsgmii";
+               };
+               port13: port@13 {
+                       reg = <13>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy13>;
+                       phy-mode = "qsgmii";
+               };
+               port14: port@14 {
+                       reg = <14>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy14>;
+                       phy-mode = "qsgmii";
+               };
+               port15: port@15 {
+                       reg = <15>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 16>;
+                       phy-handle = <&phy15>;
+                       phy-mode = "qsgmii";
+               };
+               port16: port@16 {
+                       reg = <16>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy16>;
+                       phy-mode = "qsgmii";
+               };
+               port17: port@17 {
+                       reg = <17>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy17>;
+                       phy-mode = "qsgmii";
+               };
+               port18: port@18 {
+                       reg = <18>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy18>;
+                       phy-mode = "qsgmii";
+               };
+               port19: port@19 {
+                       reg = <19>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 17>;
+                       phy-handle = <&phy19>;
+                       phy-mode = "qsgmii";
+               };
+               port20: port@20 {
+                       reg = <20>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy20>;
+                       phy-mode = "qsgmii";
+               };
+               port21: port@21 {
+                       reg = <21>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy21>;
+                       phy-mode = "qsgmii";
+               };
+               port22: port@22 {
+                       reg = <22>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy22>;
+                       phy-mode = "qsgmii";
+               };
+               port23: port@23 {
+                       reg = <23>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 18>;
+                       phy-handle = <&phy23>;
+                       phy-mode = "qsgmii";
+               };
+               port24: port@24 {
+                       reg = <24>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy24>;
+                       phy-mode = "qsgmii";
+               };
+               port25: port@25 {
+                       reg = <25>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy25>;
+                       phy-mode = "qsgmii";
+               };
+               port26: port@26 {
+                       reg = <26>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy26>;
+                       phy-mode = "qsgmii";
+               };
+               port27: port@27 {
+                       reg = <27>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 19>;
+                       phy-handle = <&phy27>;
+                       phy-mode = "qsgmii";
+               };
+               port28: port@28 {
+                       reg = <28>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy28>;
+                       phy-mode = "qsgmii";
+               };
+               port29: port@29 {
+                       reg = <29>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy29>;
+                       phy-mode = "qsgmii";
+               };
+               port30: port@30 {
+                       reg = <30>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy30>;
+                       phy-mode = "qsgmii";
+               };
+               port31: port@31 {
+                       reg = <31>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 20>;
+                       phy-handle = <&phy31>;
+                       phy-mode = "qsgmii";
+               };
+               port32: port@32 {
+                       reg = <32>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy32>;
+                       phy-mode = "qsgmii";
+               };
+               port33: port@33 {
+                       reg = <33>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy33>;
+                       phy-mode = "qsgmii";
+               };
+               port34: port@34 {
+                       reg = <34>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy34>;
+                       phy-mode = "qsgmii";
+               };
+               port35: port@35 {
+                       reg = <35>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 21>;
+                       phy-handle = <&phy35>;
+                       phy-mode = "qsgmii";
+               };
+               port36: port@36 {
+                       reg = <36>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy36>;
+                       phy-mode = "qsgmii";
+               };
+               port37: port@37 {
+                       reg = <37>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy37>;
+                       phy-mode = "qsgmii";
+               };
+               port38: port@38 {
+                       reg = <38>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy38>;
+                       phy-mode = "qsgmii";
+               };
+               port39: port@39 {
+                       reg = <39>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 22>;
+                       phy-handle = <&phy39>;
+                       phy-mode = "qsgmii";
+               };
+               port40: port@40 {
+                       reg = <40>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy40>;
+                       phy-mode = "qsgmii";
+               };
+               port41: port@41 {
+                       reg = <41>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy41>;
+                       phy-mode = "qsgmii";
+               };
+               port42: port@42 {
+                       reg = <42>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy42>;
+                       phy-mode = "qsgmii";
+               };
+               port43: port@43 {
+                       reg = <43>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 23>;
+                       phy-handle = <&phy43>;
+                       phy-mode = "qsgmii";
+               };
+               port44: port@44 {
+                       reg = <44>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy44>;
+                       phy-mode = "qsgmii";
+               };
+               port45: port@45 {
+                       reg = <45>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy45>;
+                       phy-mode = "qsgmii";
+               };
+               port46: port@46 {
+                       reg = <46>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy46>;
+                       phy-mode = "qsgmii";
+               };
+               port47: port@47 {
+                       reg = <47>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 24>;
+                       phy-handle = <&phy47>;
+                       phy-mode = "qsgmii";
+               };
+               /* Then the 25G interfaces */
+               port60: port@60 {
+                       reg = <60>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 29>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth60>;
+                       managed = "in-band-status";
+               };
+               port61: port@61 {
+                       reg = <61>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 30>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth61>;
+                       managed = "in-band-status";
+               };
+               port62: port@62 {
+                       reg = <62>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 31>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth62>;
+                       managed = "in-band-status";
+               };
+               port63: port@63 {
+                       reg = <63>;
+                       microchip,bandwidth = <25000>;
+                       phys = <&serdes 32>;
+                       phy-mode = "10gbase-r";
+                       sfp = <&sfp_eth63>;
+                       managed = "in-band-status";
+               };
+               /* Finally the Management interface */
+               port64: port@64 {
+                       reg = <64>;
+                       microchip,bandwidth = <1000>;
+                       phys = <&serdes 0>;
+                       phy-handle = <&phy64>;
+                       phy-mode = "sgmii";
+               };
+       };
+};
index b2bcbf2..ca59d1f 100644 (file)
                };
        };
 
-       dmss: dmss {
+       dmss: bus@48000000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
                dma-ranges;
-               ranges;
+               ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
 
                ti,sci-dev-id = <25>;
 
                };
        };
 
-       dmsc: dmsc@44043000 {
+       dmsc: system-controller@44043000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
                mbox-names = "rx", "tx";
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
                clocks = <&k3_clks 145 0>;
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index 99e94de..deb19ae 100644 (file)
@@ -74,8 +74,9 @@
                clocks = <&k3_clks 148 0>;
        };
 
-       mcu_gpio_intr: interrupt-controller1 {
+       mcu_gpio_intr: interrupt-controller@4210000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x04210000 0x00 0x200>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index cb340d1..6cd3131 100644 (file)
                #phy-cells = <0>;
        };
 
-       intr_main_gpio: interrupt-controller0 {
+       intr_main_gpio: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x0 0x00a00000 0x0 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                ti,interrupt-ranges = <0 392 32>;
        };
 
-       main-navss {
+       main_navss: bus@30800000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0xbc00000>;
                dma-coherent;
                dma-ranges;
 
                ti,sci-dev-id = <118>;
 
-               intr_main_navss: interrupt-controller1 {
+               intr_main_navss: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x0 0x310e0000 0x0 0x2000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index 0388c02..f5b8ef2 100644 (file)
                };
        };
 
-       mcu-navss {
+       mcu_navss: bus@28380000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
                dma-coherent;
                dma-ranges;
 
index ed42f13..7cb864b 100644 (file)
@@ -6,24 +6,24 @@
  */
 
 &cbass_wakeup {
-       dmsc: dmsc {
+       dmsc: system-controller@44083000 {
                compatible = "ti,am654-sci";
                ti,host-id = <12>;
-               #address-cells = <1>;
-               #size-cells = <1>;
-               ranges;
 
                mbox-names = "rx", "tx";
 
                mboxes= <&secure_proxy_main 11>,
                        <&secure_proxy_main 13>;
 
+               reg-names = "debug_messages";
+               reg = <0x44083000 0x1000>;
+
                k3_pds: power-controller {
                        compatible = "ti,sci-pm-domain";
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -69,8 +69,9 @@
                power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
        };
 
-       intr_wkup_gpio: interrupt-controller2 {
+       intr_wkup_gpio: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x42200000 0x200>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index 9e87fb3..eddb2ff 100644 (file)
                        gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
                };
        };
-
-       clk_ov5640_fixed: clock {
-               compatible = "fixed-clock";
-               #clock-cells = <0>;
-               clock-frequency = <24000000>;
-       };
 };
 
 &wkup_pmx0 {
        pinctrl-names = "default";
        pinctrl-0 = <&main_i2c1_pins_default>;
        clock-frequency = <400000>;
-
-       ov5640: camera@3c {
-               compatible = "ovti,ov5640";
-               reg = <0x3c>;
-
-               clocks = <&clk_ov5640_fixed>;
-               clock-names = "xclk";
-
-               port {
-                       csi2_cam0: endpoint {
-                               remote-endpoint = <&csi2_phy0>;
-                               clock-lanes = <0>;
-                               data-lanes = <1 2>;
-                       };
-               };
-       };
-
 };
 
 &main_i2c2 {
        };
 };
 
-&csi2_0 {
-       csi2_phy0: endpoint {
-               remote-endpoint = <&csi2_cam0>;
-               clock-lanes = <0>;
-               data-lanes = <1 2>;
-       };
-};
-
 &mcu_cpsw {
        pinctrl-names = "default";
        pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
index f86c493..19fea8a 100644 (file)
@@ -68,8 +68,9 @@
                };
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                #size-cells = <2>;
                ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
                ti,sci-dev-id = <199>;
+               dma-coherent;
+               dma-ranges;
 
-               main_navss_intr: interrupt-controller1 {
+               main_navss_intr: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x00 0x310e0000 0x00 0x4000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index 5e74e43..5663fe3 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 &cbass_mcu_wakeup {
-       dmsc: dmsc@44083000 {
+       dmsc: system-controller@44083000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
 
@@ -23,7 +23,7 @@
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -96,8 +96,9 @@
                clock-names = "fclk";
        };
 
-       wkup_gpio_intr: interrupt-controller2 {
+       wkup_gpio_intr: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x42200000 0x00 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
index c2aa45a..3bcafe4 100644 (file)
@@ -76,8 +76,9 @@
                };
        };
 
-       main_gpio_intr: interrupt-controller0 {
+       main_gpio_intr: interrupt-controller@a00000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x00a00000 0x00 0x800>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                ti,interrupt-ranges = <8 392 56>;
        };
 
-       main-navss {
+       main_navss: bus@30000000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
                dma-coherent;
                dma-ranges;
 
                ti,sci-dev-id = <199>;
 
-               main_navss_intr: interrupt-controller1 {
+               main_navss_intr: interrupt-controller@310e0000 {
                        compatible = "ti,sci-intr";
+                       reg = <0x0 0x310e0000 0x0 0x4000>;
                        ti,intr-trigger-type = <4>;
                        interrupt-controller;
                        interrupt-parent = <&gic500>;
index d56e347..5e825e4 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 &cbass_mcu_wakeup {
-       dmsc: dmsc@44083000 {
+       dmsc: system-controller@44083000 {
                compatible = "ti,k2g-sci";
                ti,host-id = <12>;
 
@@ -23,7 +23,7 @@
                        #power-domain-cells = <2>;
                };
 
-               k3_clks: clocks {
+               k3_clks: clock-controller {
                        compatible = "ti,k2g-sci-clk";
                        #clock-cells = <2>;
                };
@@ -96,8 +96,9 @@
                clock-names = "fclk";
        };
 
-       wkup_gpio_intr: interrupt-controller2 {
+       wkup_gpio_intr: interrupt-controller@42200000 {
                compatible = "ti,sci-intr";
+               reg = <0x00 0x42200000 0x00 0x400>;
                ti,intr-trigger-type = <1>;
                interrupt-controller;
                interrupt-parent = <&gic500>;
                };
        };
 
-       mcu-navss {
+       mcu_navss: bus@28380000 {
                compatible = "simple-mfd";
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges;
+               ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
                dma-coherent;
                dma-ranges;
 
index 2175ec0..451e11e 100644 (file)
@@ -74,7 +74,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
  * This insanity brought to you by speculative system register reads,
  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
  *
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
  */
 #define arch_counter_enforce_ordering(val) do {                                \
        u64 tmp, _val = (val);                                          \
index cf8df03..5e9b33c 100644 (file)
@@ -63,6 +63,7 @@
 #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector            18
 #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize             19
 #define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp                  20
+#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc                  21
 
 #ifndef __ASSEMBLY__
 
@@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
+extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
+
 extern u64 __vgic_v3_get_gic_config(void);
 extern u64 __vgic_v3_read_vmcr(void);
 extern void __vgic_v3_write_vmcr(u32 vmcr);
index f612c09..01b9857 100644 (file)
@@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
        vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
 }
 
+static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
+{
+       return test_bit(feature, vcpu->arch.features);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
index 1cb39c0..e720148 100644 (file)
@@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                        return ret;
        }
 
-       if (run->immediate_exit)
-               return -EINTR;
-
        vcpu_load(vcpu);
 
+       if (run->immediate_exit) {
+               ret = -EINTR;
+               goto out;
+       }
+
        kvm_sigset_activate(vcpu);
 
        ret = 1;
@@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
        kvm_sigset_deactivate(vcpu);
 
+out:
+       /*
+        * In the unlikely event that we are returning to userspace
+        * with pending exceptions or PC adjustment, commit these
+        * adjustments in order to give userspace a consistent view of
+        * the vcpu state. Note that this relies on __kvm_adjust_pc()
+        * being preempt-safe on VHE.
+        */
+       if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
+                                        KVM_ARM64_INCREMENT_PC)))
+               kvm_call_hyp(__kvm_adjust_pc, vcpu);
+
        vcpu_put(vcpu);
        return ret;
 }
index 7362909..11541b9 100644 (file)
@@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
        *vcpu_pc(vcpu) = vect_offset;
 }
 
-void kvm_inject_exception(struct kvm_vcpu *vcpu)
+static void kvm_inject_exception(struct kvm_vcpu *vcpu)
 {
        if (vcpu_el1_is_32bit(vcpu)) {
                switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
@@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
                }
        }
 }
+
+/*
+ * Adjust the guest PC (and potentially exception state) depending on
+ * flags provided by the emulation code.
+ */
+void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+               kvm_inject_exception(vcpu);
+               vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+                                     KVM_ARM64_EXCEPT_MASK);
+       } else  if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+               kvm_skip_instr(vcpu);
+               vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+       }
+}
index 6171635..4fdfeab 100644 (file)
@@ -13,8 +13,6 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_host.h>
 
-void kvm_inject_exception(struct kvm_vcpu *vcpu);
-
 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
 {
        if (vcpu_mode_is_32bit(vcpu)) {
@@ -44,22 +42,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
 }
 
 /*
- * Adjust the guest PC on entry, depending on flags provided by EL1
- * for the purpose of emulation (MMIO, sysreg) or exception injection.
- */
-static inline void __adjust_pc(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
-               kvm_inject_exception(vcpu);
-               vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
-                                     KVM_ARM64_EXCEPT_MASK);
-       } else  if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
-               kvm_skip_instr(vcpu);
-               vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
-       }
-}
-
-/*
  * Skip an instruction while host sysregs are live.
  * Assumes host is always 64-bit.
  */
index f36420a..1632f00 100644 (file)
@@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
        cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
 }
 
+static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
+{
+       DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+       __kvm_adjust_pc(kern_hyp_va(vcpu));
+}
+
 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
 {
        __kvm_flush_vm_context();
@@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
 
 static const hcall_t host_hcall[] = {
        HANDLE_FUNC(__kvm_vcpu_run),
+       HANDLE_FUNC(__kvm_adjust_pc),
        HANDLE_FUNC(__kvm_flush_vm_context),
        HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
        HANDLE_FUNC(__kvm_tlb_flush_vmid),
index e342f7f..4b60c00 100644 (file)
@@ -23,8 +23,8 @@
 extern unsigned long hyp_nr_cpus;
 struct host_kvm host_kvm;
 
-struct hyp_pool host_s2_mem;
-struct hyp_pool host_s2_dev;
+static struct hyp_pool host_s2_mem;
+static struct hyp_pool host_s2_dev;
 
 /*
  * Copies of the host's CPU features registers holding sanitized values.
index 7488f53..a3d3a27 100644 (file)
@@ -17,7 +17,6 @@
 #include <nvhe/trap_handler.h>
 
 struct hyp_pool hpool;
-struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
 unsigned long hyp_nr_cpus;
 
 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
@@ -27,6 +26,7 @@ static void *vmemmap_base;
 static void *hyp_pgt_base;
 static void *host_s2_mem_pgt_base;
 static void *host_s2_dev_pgt_base;
+static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
 
 static int divide_memory_pool(void *virt, unsigned long size)
 {
index e9f6ea7..f7af968 100644 (file)
@@ -4,7 +4,6 @@
  * Author: Marc Zyngier <marc.zyngier@arm.com>
  */
 
-#include <hyp/adjust_pc.h>
 #include <hyp/switch.h>
 #include <hyp/sysreg-sr.h>
 
@@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
         */
        __debug_save_host_buffers_nvhe(vcpu);
 
-       __adjust_pc(vcpu);
+       __kvm_adjust_pc(vcpu);
 
        /*
         * We must restore the 32-bit state before the sysregs, thanks
index 7b8f7db..b322992 100644 (file)
@@ -4,7 +4,6 @@
  * Author: Marc Zyngier <marc.zyngier@arm.com>
  */
 
-#include <hyp/adjust_pc.h>
 #include <hyp/switch.h>
 
 #include <linux/arm-smccc.h>
@@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
        __load_guest_stage2(vcpu->arch.hw_mmu);
        __activate_traps(vcpu);
 
-       __adjust_pc(vcpu);
+       __kvm_adjust_pc(vcpu);
 
        sysreg_restore_guest_state_vhe(guest_ctxt);
        __debug_switch_to_guest(vcpu);
index c5d1f3c..c10207f 100644 (file)
@@ -1156,13 +1156,13 @@ out_unlock:
 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
                             (range->end - range->start) << PAGE_SHIFT,
                             range->may_block);
 
-       return 0;
+       return false;
 }
 
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1170,7 +1170,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
        kvm_pfn_t pfn = pte_pfn(range->pte);
 
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        WARN_ON(range->end - range->start != 1);
 
@@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
                               PAGE_SIZE, __pfn_to_phys(pfn),
                               KVM_PGTABLE_PROT_R, NULL);
 
-       return 0;
+       return false;
 }
 
 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
        pte_t pte;
 
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
 
@@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        if (!kvm->arch.mmu.pgt)
-               return 0;
+               return false;
 
        return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
                                           range->start << PAGE_SHIFT);
index 956cdc2..d37ebee 100644 (file)
@@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu *tmp;
+       bool is32bit;
+       int i;
+
+       is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+               return false;
+
+       /* Check that the vcpus are either all 32bit or all 64bit */
+       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+               if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
+                       return false;
+       }
+
+       return true;
+}
+
 /**
  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
  * @vcpu: The VCPU pointer
@@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                }
        }
 
+       if (!vcpu_allowed_register_width(vcpu)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        switch (vcpu->arch.target) {
        default:
                if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
-                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
-                               ret = -EINVAL;
-                               goto out;
-                       }
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index 76ea280..1a7968a 100644 (file)
@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+       trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
 
        return true;
 }
@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_bvr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
 }
 
 static bool trap_bcr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+       trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
 
        return true;
 }
@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_bcr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
 }
 
 static bool trap_wvr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write,
-               vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+       trace_trap_reg(__func__, rd->CRm, p->is_write,
+               vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
 
        return true;
 }
@@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_wvr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
 }
 
 static bool trap_wcr(struct kvm_vcpu *vcpu,
                     struct sys_reg_params *p,
                     const struct sys_reg_desc *rd)
 {
-       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
 
        if (p->is_write)
                reg_to_dbg(vcpu, p, rd, dbg_reg);
        else
                dbg_to_reg(vcpu, p, rd, dbg_reg);
 
-       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+       trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
 
        return true;
 }
@@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
 
        if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        const struct kvm_one_reg *reg, void __user *uaddr)
 {
-       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
 
        if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
                return -EFAULT;
@@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 static void reset_wcr(struct kvm_vcpu *vcpu,
                      const struct sys_reg_desc *rd)
 {
-       vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+       vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
 }
 
 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
index 6dd9369..89b66ef 100644 (file)
@@ -515,7 +515,8 @@ static void __init map_mem(pgd_t *pgdp)
         */
        BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
 
-       if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
+       if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
+           IS_ENABLED(CONFIG_KFENCE))
                flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
        /*
index b184baa..f175bce 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/reboot.h>
 #include <asm/setup.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <prom.h>
 
 const char *get_system_type(void)
index 569e814..5747f17 100644 (file)
                        ranges = <0x01000000 0x0 0x00000000 0x0 0x18000000  0x0 0x00010000>,
                                 <0x02000000 0x0 0x40000000 0x0 0x40000000  0x0 0x40000000>;
 
+                       gmac@3,0 {
+                               compatible = "pci0014,7a03.0",
+                                                  "pci0014,7a03",
+                                                  "pciclass0c0320",
+                                                  "pciclass0c03",
+                                                  "loongson, pci-gmac";
+
+                               reg = <0x1800 0x0 0x0 0x0 0x0>;
+                               interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+                                            <13 IRQ_TYPE_LEVEL_LOW>;
+                               interrupt-names = "macirq", "eth_lpi";
+                               interrupt-parent = <&liointc0>;
+                               phy-mode = "rgmii";
+                               mdio {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       compatible = "snps,dwmac-mdio";
+                                       phy0: ethernet-phy@0 {
+                                               reg = <0>;
+                                       };
+                               };
+                       };
+
+                       gmac@3,1 {
+                               compatible = "pci0014,7a03.0",
+                                                  "pci0014,7a03",
+                                                  "pciclass0c0320",
+                                                  "pciclass0c03",
+                                                  "loongson, pci-gmac";
+
+                               reg = <0x1900 0x0 0x0 0x0 0x0>;
+                               interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
+                                            <15 IRQ_TYPE_LEVEL_LOW>;
+                               interrupt-names = "macirq", "eth_lpi";
+                               interrupt-parent = <&liointc0>;
+                               phy-mode = "rgmii";
+                               mdio {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       compatible = "snps,dwmac-mdio";
+                                       phy1: ethernet-phy@1 {
+                                               reg = <0>;
+                                       };
+                               };
+                       };
+
                        ehci@4,1 {
                                compatible = "pci0014,7a14.0",
                                                   "pci0014,7a14",
index f99a7a1..58b9bb4 100644 (file)
                                compatible = "pci0014,7a03.0",
                                                   "pci0014,7a03",
                                                   "pciclass020000",
-                                                  "pciclass0200";
+                                                  "pciclass0200",
+                                                  "loongson, pci-gmac";
 
                                reg = <0x1800 0x0 0x0 0x0 0x0>;
                                interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
                                compatible = "pci0014,7a03.0",
                                                   "pci0014,7a03",
                                                   "pciclass020000",
-                                                  "pciclass0200";
+                                                  "pciclass0200",
+                                                  "loongson, pci-gmac";
 
                                reg = <0x1900 0x0 0x0 0x0 0x0>;
                                interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
index f93aa5e..3481ed4 100644 (file)
@@ -3,6 +3,9 @@
  *
  */
 
+#ifndef _ASM_MIPS_BOARDS_LAUNCH_H
+#define _ASM_MIPS_BOARDS_LAUNCH_H
+
 #ifndef _ASSEMBLER_
 
 struct cpulaunch {
@@ -34,3 +37,5 @@ struct cpulaunch {
 
 /* Polling period in count cycles for secondary CPU's */
 #define LAUNCHPERIOD   10000
+
+#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */
index 2d94996..cdf404a 100644 (file)
 #define SO_PREFER_BUSY_POLL    69
 #define SO_BUSY_POLL_BUDGET    70
 
+#define SO_NETNS_COOKIE                71
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64
index de03838..a9b72ea 100644 (file)
@@ -37,7 +37,7 @@
  */
 notrace void arch_local_irq_disable(void)
 {
-       preempt_disable();
+       preempt_disable_notrace();
 
        __asm__ __volatile__(
        "       .set    push                                            \n"
@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
        : /* no inputs */
        : "memory");
 
-       preempt_enable();
+       preempt_enable_notrace();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
 {
        unsigned long flags;
 
-       preempt_disable();
+       preempt_disable_notrace();
 
        __asm__ __volatile__(
        "       .set    push                                            \n"
@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
        : /* no inputs */
        : "memory");
 
-       preempt_enable();
+       preempt_enable_notrace();
 
        return flags;
 }
@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
 {
        unsigned long __tmp1;
 
-       preempt_disable();
+       preempt_disable_notrace();
 
        __asm__ __volatile__(
        "       .set    push                                            \n"
@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
        : "0" (flags)
        : "memory");
 
-       preempt_enable();
+       preempt_enable_notrace();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
 
index a7bf0c8..830ab91 100644 (file)
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
 EXPORT_SYMBOL(_page_cachable_default);
 
 #define PM(p)  __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
 
 static inline void setup_protection_map(void)
 {
        protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[4]  = PVA(_PAGE_PRESENT);
-       protection_map[5]  = PVA(_PAGE_PRESENT);
-       protection_map[6]  = PVA(_PAGE_PRESENT);
-       protection_map[7]  = PVA(_PAGE_PRESENT);
+       protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+       protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[4]  = PM(_PAGE_PRESENT);
+       protection_map[5]  = PM(_PAGE_PRESENT);
+       protection_map[6]  = PM(_PAGE_PRESENT);
+       protection_map[7]  = PM(_PAGE_PRESENT);
 
        protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+       protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
                                _PAGE_NO_READ);
-       protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
-       protection_map[12] = PVA(_PAGE_PRESENT);
-       protection_map[13] = PVA(_PAGE_PRESENT);
-       protection_map[14] = PVA(_PAGE_PRESENT);
-       protection_map[15] = PVA(_PAGE_PRESENT);
+       protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+       protection_map[12] = PM(_PAGE_PRESENT);
+       protection_map[13] = PM(_PAGE_PRESENT);
+       protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+       protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 }
 
-#undef _PVA
 #undef PM
 
 void cpu_cache_init(void)
index 0c5de07..0135376 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/sizes.h>
 #include <linux/of_fdt.h>
@@ -25,6 +26,7 @@
 
 __iomem void *rt_sysc_membase;
 __iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
 
 __iomem void *plat_of_remap_node(const char *node)
 {
index f609043..5b5351c 100644 (file)
 #define SO_PREFER_BUSY_POLL    0x4043
 #define SO_BUSY_POLL_BUDGET    0x4044
 
+#define SO_NETNS_COOKIE                0x4045
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64
index c2717f3..ccda0a9 100644 (file)
        };
 
 /include/ "pq3-i2c-0.dtsi"
+       i2c@3000 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "pq3-i2c-1.dtsi"
+       i2c@3100 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "pq3-duart-0.dtsi"
 /include/ "pq3-espi-0.dtsi"
        spi0: spi@7000 {
index 872e448..ddc018d 100644 (file)
        };
 
 /include/ "qoriq-i2c-0.dtsi"
+       i2c@118000 {
+               fsl,i2c-erratum-a004447;
+       };
+
+       i2c@118100 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "qoriq-i2c-1.dtsi"
+       i2c@119000 {
+               fsl,i2c-erratum-a004447;
+       };
+
+       i2c@119100 {
+               fsl,i2c-erratum-a004447;
+       };
+
 /include/ "qoriq-duart-0.dtsi"
 /include/ "qoriq-duart-1.dtsi"
 /include/ "qoriq-gpio-0.dtsi"
index 1e83359..7f2e90d 100644 (file)
@@ -51,6 +51,7 @@
 /* PPC-specific vcpu->requests bit members */
 #define KVM_REQ_WATCHDOG       KVM_ARCH_REQ(0)
 #define KVM_REQ_EPR_EXIT       KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER  KVM_ARCH_REQ(2)
 
 #include <linux/mmu_notifier.h>
 
index 33fa5dd..714a35f 100644 (file)
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
        pgd_t *pgdir = init_mm.pgd;
        return __find_linux_pte(pgdir, ea, NULL, hshift);
 }
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+       pte_t *ptep;
+       phys_addr_t pa;
+       int hugepage_shift;
+
+       /*
+        * init_mm does not free page tables, and does not do THP. It may
+        * have huge pages from huge vmalloc / ioremap etc.
+        */
+       ptep = find_init_mm_pte(addr, &hugepage_shift);
+       if (WARN_ON(!ptep))
+               return 0;
+
+       pa = PFN_PHYS(pte_pfn(*ptep));
+
+       if (!hugepage_shift)
+               hugepage_shift = PAGE_SHIFT;
+
+       pa |= addr & ((1ul << hugepage_shift) - 1);
+
+       return pa;
+}
+
 /*
  * This is what we should always use. Any other lockless page table lookup needs
  * careful audit against THP split.
index f24cd53..3bbdcc8 100644 (file)
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
  */
 static inline unsigned long eeh_token_to_phys(unsigned long token)
 {
-       pte_t *ptep;
-       unsigned long pa;
-       int hugepage_shift;
-
-       /*
-        * We won't find hugepages here(this is iomem). Hence we are not
-        * worried about _PAGE_SPLITTING/collapse. Also we will not hit
-        * page table free, because of init_mm.
-        */
-       ptep = find_init_mm_pte(token, &hugepage_shift);
-       if (!ptep)
-               return token;
-
-       pa = pte_pfn(*ptep);
-
-       /* On radix we can do hugepage mappings for io, so handle that */
-       if (!hugepage_shift)
-               hugepage_shift = PAGE_SHIFT;
-
-       pa <<= PAGE_SHIFT;
-       pa |= token & ((1ul << hugepage_shift) - 1);
-       return pa;
+       return ppc_find_vmap_phys(token);
 }
 
 /*
index 51bbaae..c877f07 100644 (file)
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
 #ifdef CONFIG_PPC_INDIRECT_MMIO
 struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
 {
-       unsigned hugepage_shift;
        struct iowa_bus *bus;
        int token;
 
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
                bus = &iowa_busses[token - 1];
        else {
                unsigned long vaddr, paddr;
-               pte_t *ptep;
 
                vaddr = (unsigned long)PCI_FIX_ADDR(addr);
                if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
                        return NULL;
-               /*
-                * We won't find huge pages here (iomem). Also can't hit
-                * a page table free due to init_mm
-                */
-               ptep = find_init_mm_pte(vaddr, &hugepage_shift);
-               if (ptep == NULL)
-                       paddr = 0;
-               else {
-                       WARN_ON(hugepage_shift);
-                       paddr = pte_pfn(*ptep) << PAGE_SHIFT;
-               }
+
+               paddr = ppc_find_vmap_phys(vaddr);
+
                bus = iowa_pci_find(vaddr, paddr);
 
                if (bus == NULL)
index 57d6b85..2af89a5 100644 (file)
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        unsigned int order;
        unsigned int nio_pages, io_order;
        struct page *page;
-       size_t size_io = size;
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        memset(ret, 0, size);
 
        /* Set up tces to cover the allocated range */
-       size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
-       nio_pages = size_io >> tbl->it_page_shift;
-       io_order = get_iommu_order(size_io, tbl);
+       nio_pages = size >> tbl->it_page_shift;
+       io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
                              mask >> tbl->it_page_shift, io_order, 0);
        if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
                         void *vaddr, dma_addr_t dma_handle)
 {
        if (tbl) {
-               size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
-               unsigned int nio_pages = size_io >> tbl->it_page_shift;
+               unsigned int nio_pages;
 
+               size = PAGE_ALIGN(size);
+               nio_pages = size >> tbl->it_page_shift;
                iommu_free(tbl, dma_handle, nio_pages);
                size = PAGE_ALIGN(size);
                free_pages((unsigned long)vaddr, get_order(size));
index 01ab216..e8c2a63 100644 (file)
@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
        int ret = 0;
        struct kprobe *prev;
        struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
-       struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
 
        if ((unsigned long)p->addr & 0x03) {
                printk("Attempt to register kprobe at an unaligned address\n");
@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
        } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
                printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
                ret = -EINVAL;
-       } else if (ppc_inst_prefixed(prefix)) {
+       } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+                  ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
                printk("Cannot register a kprobe on the second word of prefixed instruction\n");
                ret = -EINVAL;
        }
index 28a80d2..bc08136 100644 (file)
@@ -3936,7 +3936,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
                                break;
                        }
                        cur = ktime_get();
-               } while (single_task_running() && ktime_before(cur, stop));
+               } while (kvm_vcpu_can_poll(cur, stop));
 
                spin_lock(&vc->lock);
                vc->vcore_state = VCORE_INACTIVE;
@@ -4455,7 +4455,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
                mtspr(SPRN_EBBRR, ebb_regs[1]);
                mtspr(SPRN_BESCR, ebb_regs[2]);
                mtspr(SPRN_TAR, user_tar);
-               mtspr(SPRN_FSCR, current->thread.fscr);
        }
        mtspr(SPRN_VRSAVE, user_vrsave);
 
index 7af7c70..7a0f124 100644 (file)
 #include <asm/pte-walk.h>
 
 /* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
 {
-       unsigned long addr = (unsigned long) x;
-       pte_t *p;
-       /*
-        * assume we don't have huge pages in vmalloc space...
-        * So don't worry about THP collapse/split. Called
-        * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
-        */
-       p = find_init_mm_pte(addr, NULL);
-       if (!p || !pte_present(*p))
-               return NULL;
-       addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
-       return __va(addr);
+       return __va(ppc_find_vmap_phys((unsigned long)addr));
 }
 
 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
index 5e634db..004f0d4 100644 (file)
@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 #define STACK_SLOT_UAMOR       (SFS-88)
 #define STACK_SLOT_DAWR1       (SFS-96)
 #define STACK_SLOT_DAWRX1      (SFS-104)
+#define STACK_SLOT_FSCR                (SFS-112)
 /* the following is used by the P9 short path */
 #define STACK_SLOT_NVGPRS      (SFS-152)       /* 18 gprs */
 
@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION
        std     r6, STACK_SLOT_DAWR0(r1)
        std     r7, STACK_SLOT_DAWRX0(r1)
        std     r8, STACK_SLOT_IAMR(r1)
+       mfspr   r5, SPRN_FSCR
+       std     r5, STACK_SLOT_FSCR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 BEGIN_FTR_SECTION
        mfspr   r6, SPRN_DAWR1
@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE
        ld      r7, STACK_SLOT_HFSCR(r1)
        mtspr   SPRN_HFSCR, r7
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+       ld      r5, STACK_SLOT_FSCR(r1)
+       mtspr   SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        /*
         * Restore various registers to 0, where non-zero values
         * set by the guest could disrupt the host.
index a8ad8eb..18ec0f9 100644 (file)
@@ -34,6 +34,7 @@ config RISCV
        select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
        select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
        select ARCH_SUPPORTS_HUGETLBFS if MMU
+       select ARCH_USE_MEMTEST
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
@@ -60,11 +61,11 @@ config RISCV
        select GENERIC_TIME_VSYSCALL if MMU && 64BIT
        select HANDLE_DOMAIN_IRQ
        select HAVE_ARCH_AUDITSYSCALL
-       select HAVE_ARCH_JUMP_LABEL
-       select HAVE_ARCH_JUMP_LABEL_RELATIVE
+       select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+       select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && 64BIT
        select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
-       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_KGDB if !XIP_KERNEL
        select HAVE_ARCH_KGDB_QXFER_PKT
        select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_SECCOMP_FILTER
@@ -79,9 +80,9 @@ config RISCV
        select HAVE_GCC_PLUGINS
        select HAVE_GENERIC_VDSO if MMU && 64BIT
        select HAVE_IRQ_TIME_ACCOUNTING
-       select HAVE_KPROBES
-       select HAVE_KPROBES_ON_FTRACE
-       select HAVE_KRETPROBES
+       select HAVE_KPROBES if !XIP_KERNEL
+       select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+       select HAVE_KRETPROBES if !XIP_KERNEL
        select HAVE_PCI
        select HAVE_PERF_EVENTS
        select HAVE_PERF_REGS
@@ -230,11 +231,11 @@ config ARCH_RV64I
        bool "RV64I"
        select 64BIT
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
-       select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
+       select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
-       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
        select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select SWIOTLB if MMU
 
 endchoice
index 3eb9590..4be0206 100644 (file)
@@ -38,6 +38,15 @@ else
        KBUILD_LDFLAGS += -melf32lriscv
 endif
 
+ifeq ($(CONFIG_LD_IS_LLD),y)
+       KBUILD_CFLAGS += -mno-relax
+       KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+       KBUILD_CFLAGS += -Wa,-mno-relax
+       KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
 # ISA string setting
 riscv-march-$(CONFIG_ARCH_RV32I)       := rv32ima
 riscv-march-$(CONFIG_ARCH_RV64I)       := rv64ima
index 622b127..855c150 100644 (file)
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
index 74c47fe..d90e4eb 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
                            hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
index bdd5fc8..2fde48d 100644 (file)
@@ -1,2 +1,2 @@
-obj-y += errata_cip_453.o
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
 obj-y += errata.o
index 88c0870..67406c3 100644 (file)
@@ -51,7 +51,7 @@
        REG_ASM " " newlen "\n" \
        ".word " errata_id "\n"
 
-#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
        ".if " __stringify(enable) " == 1\n"                            \
        ".pushsection .alternative, \"a\"\n"                            \
        ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
@@ -69,7 +69,7 @@
        "886 :\n"       \
        old_c "\n"      \
        "887 :\n"       \
-       ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
+       ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
 
 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
        __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
index 1e95410..e4e291d 100644 (file)
@@ -42,8 +42,8 @@ struct kimage_arch {
        unsigned long fdt_addr;
 };
 
-const extern unsigned char riscv_kexec_relocate[];
-const extern unsigned int riscv_kexec_relocate_size;
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
 
 typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
                                   unsigned long jump_addr,
index cc04814..9e99e1d 100644 (file)
@@ -14,8 +14,9 @@
 #include <asm/set_memory.h>    /* For set_memory_x() */
 #include <linux/compiler.h>    /* For unreachable() */
 #include <linux/cpu.h>         /* For cpu_down() */
+#include <linux/reboot.h>
 
-/**
+/*
  * kexec_image_info - Print received image details
  */
 static void
@@ -39,7 +40,7 @@ kexec_image_info(const struct kimage *image)
        }
 }
 
-/**
+/*
  * machine_kexec_prepare - Initialize kexec
  *
  * This function is called from do_kexec_load, when the user has
@@ -100,7 +101,7 @@ machine_kexec_prepare(struct kimage *image)
 }
 
 
-/**
+/*
  * machine_kexec_cleanup - Cleanup any leftovers from
  *                        machine_kexec_prepare
  *
@@ -135,7 +136,7 @@ void machine_shutdown(void)
 #endif
 }
 
-/**
+/*
  * machine_crash_shutdown - Prepare to kexec after a kernel crash
  *
  * This function is called by crash_kexec just before machine_kexec
@@ -151,7 +152,7 @@ machine_crash_shutdown(struct pt_regs *regs)
        pr_info("Starting crashdump kernel...\n");
 }
 
-/**
+/*
  * machine_kexec - Jump to the loaded kimage
  *
  * This function is called by kernel_kexec which is called by the
index 10b965c..15cc65a 100644 (file)
@@ -84,6 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        return 0;
 }
 
+#ifdef CONFIG_MMU
 void *alloc_insn_page(void)
 {
        return  __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
@@ -91,6 +92,7 @@ void *alloc_insn_page(void)
                                     VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
                                     __builtin_return_address(0));
 }
+#endif
 
 /* install breakpoint in text */
 void __kprobes arch_arm_kprobe(struct kprobe *p)
index 03901d3..9a1b7a0 100644 (file)
@@ -231,13 +231,13 @@ static void __init init_resources(void)
 
        /* Clean-up any unused pre-allocated resources */
        mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
        return;
 
  error:
        /* Better an empty resource tree than an inconsistent one */
        release_child_resources(&iomem_resource);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
 }
 
 
index 2b3e0cb..bde85fc 100644 (file)
@@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                fp = frame_pointer(regs);
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
-       } else if (task == NULL || task == current) {
-               fp = (unsigned long)__builtin_frame_address(0);
-               sp = sp_in_global;
-               pc = (unsigned long)walk_stackframe;
+       } else if (task == current) {
+               fp = (unsigned long)__builtin_frame_address(1);
+               sp = (unsigned long)__builtin_frame_address(0);
+               pc = (unsigned long)__builtin_return_address(0);
        } else {
                /* task blocked in __switch_to */
                fp = task->thread.s[0];
@@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
        return true;
 }
 
-void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
                    const char *loglvl)
 {
-       pr_cont("%sCall Trace:\n", loglvl);
        walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
 }
 
 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 {
+       pr_cont("%sCall Trace:\n", loglvl);
        dump_backtrace(NULL, task, loglvl);
 }
 
@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *task)
 
 #ifdef CONFIG_STACKTRACE
 
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
                     struct task_struct *task, struct pt_regs *regs)
 {
        walk_stackframe(task, regs, consume_entry, cookie);
index 0721b97..7bc88d8 100644 (file)
@@ -86,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
        }
 }
 
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section         __section(".xip.traps")
+#else
+#define __trap_section
+#endif
 #define DO_ERROR_INFO(name, signo, code, str)                          \
-asmlinkage __visible void name(struct pt_regs *regs)                   \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs)    \
 {                                                                      \
        do_trap_error(regs, signo, code, regs->epc, "Oops - " str);     \
 }
@@ -111,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
 int handle_misaligned_load(struct pt_regs *regs);
 int handle_misaligned_store(struct pt_regs *regs);
 
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
 {
        if (!handle_misaligned_load(regs))
                return;
@@ -119,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
                      "Oops - load address misaligned");
 }
 
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
 {
        if (!handle_misaligned_store(regs))
                return;
@@ -146,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
        return GET_INSN_LENGTH(insn);
 }
 
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
 {
 #ifdef CONFIG_KPROBES
        if (kprobe_single_step_handler(regs))
index 4b29b99..a3ff09c 100644 (file)
@@ -99,9 +99,22 @@ SECTIONS
        }
        PERCPU_SECTION(L1_CACHE_BYTES)
 
-       . = ALIGN(PAGE_SIZE);
+       . = ALIGN(8);
+       .alternative : {
+               __alt_start = .;
+               *(.alternative)
+               __alt_end = .;
+       }
        __init_end = .;
 
+       . = ALIGN(16);
+       .xip.traps : {
+               __xip_traps_start = .;
+               *(.xip.traps)
+               __xip_traps_end = .;
+       }
+
+       . = ALIGN(PAGE_SIZE);
        .sdata : {
                __global_pointer$ = . + 0x800;
                *(.sdata*)
index 4faf8bd..4c4c92c 100644 (file)
@@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void)
        unsigned long init_data_start = (unsigned long)__init_data_begin;
        unsigned long rodata_start = (unsigned long)__start_rodata;
        unsigned long data_start = (unsigned long)_data;
-       unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+       unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+       unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
 
        set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
        set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
        set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
        /* rodata section is marked readonly in mark_rodata_ro */
        set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-       set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+       set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
 }
 
 void mark_rodata_ro(void)
index 848a22f..92675dc 100644 (file)
 #define SO_PREFER_BUSY_POLL     0x0048
 #define SO_BUSY_POLL_BUDGET     0x0049
 
+#define SO_NETNS_COOKIE          0x0050
+
 #if !defined(__KERNEL__)
 
 
index 3075294..cb5e8d3 100644 (file)
@@ -200,8 +200,9 @@ endif
 KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
 
 ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-                  -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
 endif
 
 ifdef CONFIG_X86_NEED_RELOCS
index 63f0972..3a75a2c 100644 (file)
@@ -1406,6 +1406,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
                                                die_id = i;
                                        else
                                                die_id = topology_phys_to_logical_pkg(i);
+                                       if (die_id < 0)
+                                               die_id = -ENODEV;
                                        map->pbus_to_dieid[bus] = die_id;
                                        break;
                                }
@@ -1452,14 +1454,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
                        i = -1;
                        if (reverse) {
                                for (bus = 255; bus >= 0; bus--) {
-                                       if (map->pbus_to_dieid[bus] >= 0)
+                                       if (map->pbus_to_dieid[bus] != -1)
                                                i = map->pbus_to_dieid[bus];
                                        else
                                                map->pbus_to_dieid[bus] = i;
                                }
                        } else {
                                for (bus = 0; bus <= 255; bus++) {
-                                       if (map->pbus_to_dieid[bus] >= 0)
+                                       if (map->pbus_to_dieid[bus] != -1)
                                                i = map->pbus_to_dieid[bus];
                                        else
                                                map->pbus_to_dieid[bus] = i;
@@ -5097,9 +5099,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
        .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
        .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
+       .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
        .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
        .ops            = &snr_m2m_uncore_pci_ops,
-       .format_group   = &skx_uncore_format_group,
+       .format_group   = &snr_m2m_uncore_format_group,
 };
 
 static struct attribute *icx_upi_uncore_formats_attr[] = {
index 412b51e..48067af 100644 (file)
@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
 extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
 extern void lapic_assign_system_vectors(void);
 extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_update_legacy_vectors(void);
 extern void lapic_online(void);
 extern void lapic_offline(void);
 extern bool apic_needs_pit(void);
index b7dd944..8f28faf 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD        0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD         (1 << (X86_FEATURE_ENQCMD & 31))
 
 #ifdef CONFIG_X86_SGX
 # define DISABLE_SGX   0
index ed33a14..23bef08 100644 (file)
@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
  */
 #define PASID_DISABLED 0
 
-#ifdef CONFIG_IOMMU_SUPPORT
-/* Update current's PASID MSR/state by mm's PASID. */
-void update_pasid(void);
-#else
 static inline void update_pasid(void) { }
-#endif
+
 #endif /* _ASM_X86_FPU_API_H */
index 8d33ad8..ceeba9f 100644 (file)
@@ -584,13 +584,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
                        pkru_val = pk->pkru;
        }
        __write_pkru(pkru_val);
-
-       /*
-        * Expensive PASID MSR write will be avoided in update_pasid() because
-        * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
-        * unless it's different from mm->pasid to reduce overhead.
-        */
-       update_pasid();
 }
 
 #endif /* _ASM_X86_FPU_INTERNAL_H */
index 3236410..e7bef91 100644 (file)
@@ -99,6 +99,7 @@ KVM_X86_OP_NULL(post_block)
 KVM_X86_OP_NULL(vcpu_blocking)
 KVM_X86_OP_NULL(vcpu_unblocking)
 KVM_X86_OP_NULL(update_pi_irte)
+KVM_X86_OP_NULL(start_assignment)
 KVM_X86_OP_NULL(apicv_post_state_restore)
 KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
 KVM_X86_OP_NULL(set_hv_timer)
index 55efbac..9c7ced0 100644 (file)
@@ -1352,6 +1352,7 @@ struct kvm_x86_ops {
 
        int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
                              uint32_t guest_irq, bool set);
+       void (*start_assignment)(struct kvm *kvm);
        void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
        bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
 
index ddbdefd..91a7b66 100644 (file)
@@ -3,11 +3,13 @@
 #define _ASM_X86_THERMAL_H
 
 #ifdef CONFIG_X86_THERMAL_VECTOR
+void therm_lvt_init(void);
 void intel_init_thermal(struct cpuinfo_x86 *c);
 bool x86_thermal_enabled(void);
 void intel_thermal_interrupt(void);
 #else
-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+static inline void therm_lvt_init(void)                                { }
+static inline void intel_init_thermal(struct cpuinfo_x86 *c)   { }
 #endif
 
 #endif /* _ASM_X86_THERMAL_H */
index 6974b51..6fe5b44 100644 (file)
@@ -183,41 +183,69 @@ done:
 }
 
 /*
+ * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
+ *
+ * @instr: instruction byte stream
+ * @instrlen: length of the above
+ * @off: offset within @instr where the first NOP has been detected
+ *
+ * Return: number of NOPs found (and replaced).
+ */
+static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
+{
+       unsigned long flags;
+       int i = off, nnops;
+
+       while (i < instrlen) {
+               if (instr[i] != 0x90)
+                       break;
+
+               i++;
+       }
+
+       nnops = i - off;
+
+       if (nnops <= 1)
+               return nnops;
+
+       local_irq_save(flags);
+       add_nops(instr + off, nnops);
+       local_irq_restore(flags);
+
+       DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
+
+       return nnops;
+}
+
+/*
  * "noinline" to cause control flow change and thus invalidate I$ and
  * cause refetch after modification.
  */
 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 {
-       unsigned long flags;
        struct insn insn;
-       int nop, i = 0;
+       int i = 0;
 
        /*
-        * Jump over the non-NOP insns, the remaining bytes must be single-byte
-        * NOPs, optimize them.
+        * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
+        * ones.
         */
        for (;;) {
                if (insn_decode_kernel(&insn, &instr[i]))
                        return;
 
+               /*
+                * See if this and any potentially following NOPs can be
+                * optimized.
+                */
                if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
-                       break;
-
-               if ((i += insn.length) >= a->instrlen)
-                       return;
-       }
+                       i += optimize_nops_range(instr, a->instrlen, i);
+               else
+                       i += insn.length;
 
-       for (nop = i; i < a->instrlen; i++) {
-               if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
+               if (i >= a->instrlen)
                        return;
        }
-
-       local_irq_save(flags);
-       add_nops(instr + nop, i - nop);
-       local_irq_restore(flags);
-
-       DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
-                  instr, nop, a->instrlen);
 }
 
 /*
index 4a39fb4..d262811 100644 (file)
@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
        end_local_APIC_setup();
        irq_remap_enable_fault_handling();
        setup_IO_APIC();
+       lapic_update_legacy_vectors();
 }
 
 #ifdef CONFIG_UP_LATE_INIT
index 6dbdc7c..fb67ed5 100644 (file)
@@ -738,6 +738,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
        irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
 }
 
+void __init lapic_update_legacy_vectors(void)
+{
+       unsigned int i;
+
+       if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
+               return;
+
+       /*
+        * If the IO/APIC is disabled via config, kernel command line or
+        * lack of enumeration then all legacy interrupts are routed
+        * through the PIC. Make sure that they are marked as legacy
+        * vectors. PIC_CASCADE_IRQ has already been marked in
+        * lapic_assign_system_vectors().
+        */
+       for (i = 0; i < nr_legacy_irqs(); i++) {
+               if (i != PIC_CASCADE_IR)
+                       lapic_assign_legacy_vector(i, true);
+       }
+}
+
 void __init lapic_assign_system_vectors(void)
 {
        unsigned int i, vector = 0;
index 3ef5868..7aecb2f 100644 (file)
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
                case 15:
                        return msr - MSR_P4_BPU_PERFCTR0;
                }
-               fallthrough;
+               break;
        case X86_VENDOR_ZHAOXIN:
        case X86_VENDOR_CENTAUR:
                return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
                case 15:
                        return msr - MSR_P4_BSU_ESCR0;
                }
-               fallthrough;
+               break;
        case X86_VENDOR_ZHAOXIN:
        case X86_VENDOR_CENTAUR:
                return msr - MSR_ARCH_PERFMON_EVENTSEL0;
index a85c640..d0eef96 100644 (file)
@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
        return 0;
 }
 #endif /* CONFIG_PROC_PID_ARCH_STATUS */
-
-#ifdef CONFIG_IOMMU_SUPPORT
-void update_pasid(void)
-{
-       u64 pasid_state;
-       u32 pasid;
-
-       if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
-               return;
-
-       if (!current->mm)
-               return;
-
-       pasid = READ_ONCE(current->mm->pasid);
-       /* Set the valid bit in the PASID MSR/state only for valid pasid. */
-       pasid_state = pasid == PASID_DISABLED ?
-                     pasid : pasid | MSR_IA32_PASID_VALID;
-
-       /*
-        * No need to hold fregs_lock() since the task's fpstate won't
-        * be changed by others (e.g. ptrace) while the task is being
-        * switched to or is in IPI.
-        */
-       if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-               /* The MSR is active and can be directly updated. */
-               wrmsrl(MSR_IA32_PASID, pasid_state);
-       } else {
-               struct fpu *fpu = &current->thread.fpu;
-               struct ia32_pasid_state *ppasid_state;
-               struct xregs_state *xsave;
-
-               /*
-                * The CPU's xstate registers are not currently active. Just
-                * update the PASID state in the memory buffer here. The
-                * PASID MSR will be loaded when returning to user mode.
-                */
-               xsave = &fpu->state.xsave;
-               xsave->header.xfeatures |= XFEATURE_MASK_PASID;
-               ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
-               /*
-                * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
-                * won't be NULL and no need to check its value.
-                *
-                * Only update the task's PASID state when it's different
-                * from the mm's pasid.
-                */
-               if (ppasid_state->pasid != pasid_state) {
-                       /*
-                        * Invalid fpregs so that state restoring will pick up
-                        * the PASID state.
-                        */
-                       __fpu_invalidate_fpregs_state(fpu);
-                       ppasid_state->pasid = pasid_state;
-               }
-       }
-}
-#endif /* CONFIG_IOMMU_SUPPORT */
index 72920af..1e72062 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/pci-direct.h>
 #include <asm/prom.h>
 #include <asm/proto.h>
+#include <asm/thermal.h>
 #include <asm/unwind.h>
 #include <asm/vsyscall.h>
 #include <linux/vmalloc.h>
@@ -637,11 +638,11 @@ static void __init trim_snb_memory(void)
         * them from accessing certain memory ranges, namely anything below
         * 1M and in the pages listed in bad_pages[] above.
         *
-        * To avoid these pages being ever accessed by SNB gfx devices
-        * reserve all memory below the 1 MB mark and bad_pages that have
-        * not already been reserved at boot time.
+        * To avoid these pages being ever accessed by SNB gfx devices reserve
+        * bad_pages that have not already been reserved at boot time.
+        * All memory below the 1 MB mark is anyway reserved later during
+        * setup_arch(), so there is no need to reserve it here.
         */
-       memblock_reserve(0, 1<<20);
 
        for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
                if (memblock_reserve(bad_pages[i], PAGE_SIZE))
@@ -733,14 +734,14 @@ static void __init early_reserve_memory(void)
         * The first 4Kb of memory is a BIOS owned area, but generally it is
         * not listed as such in the E820 table.
         *
-        * Reserve the first memory page and typically some additional
-        * memory (64KiB by default) since some BIOSes are known to corrupt
-        * low memory. See the Kconfig help text for X86_RESERVE_LOW.
+        * Reserve the first 64K of memory since some BIOSes are known to
+        * corrupt low memory. After the real mode trampoline is allocated the
+        * rest of the memory below 640k is reserved.
         *
         * In addition, make sure page 0 is always reserved because on
         * systems with L1TF its contents can be leaked to user processes.
         */
-       memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
+       memblock_reserve(0, SZ_64K);
 
        early_reserve_initrd();
 
@@ -751,6 +752,7 @@ static void __init early_reserve_memory(void)
 
        reserve_ibft_region();
        reserve_bios_regions();
+       trim_snb_memory();
 }
 
 /*
@@ -1081,14 +1083,20 @@ void __init setup_arch(char **cmdline_p)
                        (max_pfn_mapped<<PAGE_SHIFT) - 1);
 #endif
 
-       reserve_real_mode();
-
        /*
-        * Reserving memory causing GPU hangs on Sandy Bridge integrated
-        * graphics devices should be done after we allocated memory under
-        * 1M for the real mode trampoline.
+        * Find free memory for the real mode trampoline and place it
+        * there.
+        * If there is not enough free memory under 1M, on EFI-enabled
+        * systems there will be additional attempt to reclaim the memory
+        * for the real mode trampoline at efi_free_boot_services().
+        *
+        * Unconditionally reserve the entire first 1M of RAM because
+        * BIOSes are know to corrupt low memory and several
+        * hundred kilobytes are not worth complex detection what memory gets
+        * clobbered. Moreover, on machines with SandyBridge graphics or in
+        * setups that use crashkernel the entire 1M is reserved anyway.
         */
-       trim_snb_memory();
+       reserve_real_mode();
 
        init_mem_mapping();
 
@@ -1226,6 +1234,14 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.timers.wallclock_init();
 
+       /*
+        * This needs to run before setup_local_APIC() which soft-disables the
+        * local APIC temporarily and that masks the thermal LVT interrupt,
+        * leading to softlockups on machines which have configured SMI
+        * interrupt delivery.
+        */
+       therm_lvt_init();
+
        mcheck_init();
 
        register_refined_jiffies(CLOCK_TICK_RATE);
index 9a48f13..b4da665 100644 (file)
@@ -655,6 +655,7 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
                if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
                        entry->ecx = F(RDPID);
                ++array->nent;
+               break;
        default:
                break;
        }
index 8a0ccdb..5e5de05 100644 (file)
@@ -5111,7 +5111,7 @@ done:
        return rc;
 }
 
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
 {
        int rc = X86EMUL_CONTINUE;
        int mode = ctxt->mode;
@@ -5322,7 +5322,8 @@ done_prefixes:
 
        ctxt->execute = opcode.u.execute;
 
-       if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+       if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
+           likely(!(ctxt->d & EmulateOnUD)))
                return EMULATION_FAILED;
 
        if (unlikely(ctxt->d &
index f98370a..f00830e 100644 (file)
@@ -1172,6 +1172,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
 {
        struct kvm_hv *hv = to_kvm_hv(kvm);
        u64 gfn;
+       int idx;
 
        if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
            hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
@@ -1190,9 +1191,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
        gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
 
        hv->tsc_ref.tsc_sequence = 0;
+
+       /*
+        * Take the srcu lock as memslots will be accessed to check the gfn
+        * cache generation against the memslots generation.
+        */
+       idx = srcu_read_lock(&kvm->srcu);
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
                            &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
                hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+       srcu_read_unlock(&kvm->srcu, idx);
 
 out_unlock:
        mutex_unlock(&hv->hv_lock);
index f016838..3e870bf 100644 (file)
@@ -314,7 +314,6 @@ struct x86_emulate_ctxt {
        int interruptibility;
 
        bool perm_ok; /* do not check permissions if true */
-       bool ud;        /* inject an #UD if host doesn't support insn */
        bool tf;        /* TF value before instruction (after for syscall/sysret) */
 
        bool have_exception;
@@ -491,7 +490,7 @@ enum x86_intercept {
 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
 #endif
 
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
index c0ebef5..17fa4ab 100644 (file)
@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
        if (!apic_x2apic_mode(apic))
                valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
 
+       if (alignment + len > 4)
+               return 1;
+
        if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
                return 1;
 
@@ -1494,6 +1497,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
 
 static void cancel_hv_timer(struct kvm_lapic *apic);
 
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+       hrtimer_cancel(&apic->lapic_timer.timer);
+       preempt_disable();
+       if (apic->lapic_timer.hv_timer_in_use)
+               cancel_hv_timer(apic);
+       preempt_enable();
+}
+
 static void apic_update_lvtt(struct kvm_lapic *apic)
 {
        u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1502,11 +1514,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
        if (apic->lapic_timer.timer_mode != timer_mode) {
                if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
                                APIC_LVT_TIMER_TSCDEADLINE)) {
-                       hrtimer_cancel(&apic->lapic_timer.timer);
-                       preempt_disable();
-                       if (apic->lapic_timer.hv_timer_in_use)
-                               cancel_hv_timer(apic);
-                       preempt_enable();
+                       cancel_apic_timer(apic);
                        kvm_lapic_set_reg(apic, APIC_TMICT, 0);
                        apic->lapic_timer.period = 0;
                        apic->lapic_timer.tscdeadline = 0;
@@ -1598,11 +1606,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
        guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
        apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
 
+       if (lapic_timer_advance_dynamic) {
+               adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
+               /*
+                * If the timer fired early, reread the TSC to account for the
+                * overhead of the above adjustment to avoid waiting longer
+                * than is necessary.
+                */
+               if (guest_tsc < tsc_deadline)
+                       guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+       }
+
        if (guest_tsc < tsc_deadline)
                __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
-
-       if (lapic_timer_advance_dynamic)
-               adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
 }
 
 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
@@ -1661,7 +1677,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
        }
 
        atomic_inc(&apic->lapic_timer.pending);
-       kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+       kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
        if (from_timer_fn)
                kvm_vcpu_kick(vcpu);
 }
@@ -2084,7 +2100,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
                if (apic_lvtt_tscdeadline(apic))
                        break;
 
-               hrtimer_cancel(&apic->lapic_timer.timer);
+               cancel_apic_timer(apic);
                kvm_lapic_set_reg(apic, APIC_TMICT, val);
                start_apic_timer(apic);
                break;
index 0144c40..8d5876d 100644 (file)
@@ -4739,9 +4739,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
        context->inject_page_fault = kvm_inject_page_fault;
 }
 
+static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
+{
+       union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
+
+       /*
+        * Nested MMUs are used only for walking L2's gva->gpa, they never have
+        * shadow pages of their own and so "direct" has no meaning.   Set it
+        * to "true" to try to detect bogus usage of the nested MMU.
+        */
+       role.base.direct = true;
+
+       if (!is_paging(vcpu))
+               role.base.level = 0;
+       else if (is_long_mode(vcpu))
+               role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
+                                                      PT64_ROOT_4LEVEL;
+       else if (is_pae(vcpu))
+               role.base.level = PT32E_ROOT_LEVEL;
+       else
+               role.base.level = PT32_ROOT_LEVEL;
+
+       return role;
+}
+
 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
 {
-       union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
+       union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
        struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
 
        if (new_role.as_u64 == g_context->mmu_role.as_u64)
index 70b7e44..823a591 100644 (file)
@@ -90,8 +90,8 @@ struct guest_walker {
        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
        pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
        bool pte_writable[PT_MAX_FULL_LEVELS];
-       unsigned pt_access;
-       unsigned pte_access;
+       unsigned int pt_access[PT_MAX_FULL_LEVELS];
+       unsigned int pte_access;
        gfn_t gfn;
        struct x86_exception fault;
 };
@@ -418,13 +418,15 @@ retry_walk:
                }
 
                walker->ptes[walker->level - 1] = pte;
+
+               /* Convert to ACC_*_MASK flags for struct guest_walker.  */
+               walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
        } while (!is_last_gpte(mmu, walker->level, pte));
 
        pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
        accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
 
        /* Convert to ACC_*_MASK flags for struct guest_walker.  */
-       walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
        walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
        errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
        if (unlikely(errcode))
@@ -463,7 +465,8 @@ retry_walk:
        }
 
        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
-                __func__, (u64)pte, walker->pte_access, walker->pt_access);
+                __func__, (u64)pte, walker->pte_access,
+                walker->pt_access[walker->level - 1]);
        return 1;
 
 error:
@@ -643,7 +646,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
        bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
        struct kvm_mmu_page *sp = NULL;
        struct kvm_shadow_walk_iterator it;
-       unsigned direct_access, access = gw->pt_access;
+       unsigned int direct_access, access;
        int top_level, level, req_level, ret;
        gfn_t base_gfn = gw->gfn;
 
@@ -675,6 +678,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                sp = NULL;
                if (!is_shadow_present_pte(*it.sptep)) {
                        table_gfn = gw->table_gfn[it.level - 2];
+                       access = gw->pt_access[it.level - 2];
                        sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
                                              false, access);
                }
index 95eeb5a..237317b 100644 (file)
@@ -1192,9 +1192,9 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 }
 
 /*
- * Remove write access from all the SPTEs mapping GFNs [start, end). If
- * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
+ * Remove write access from all SPTEs at or above min_level that map GFNs
+ * [start, end). Returns true if an SPTE has been changed and the TLBs need to
+ * be flushed.
  */
 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                             gfn_t start, gfn_t end, int min_level)
index 712b4e0..5e7e920 100644 (file)
 #include "svm.h"
 
 /* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
+bool avic;
+module_param(avic, bool, S_IRUGO);
 
 #define SVM_AVIC_DOORBELL      0xc001011b
 
@@ -223,7 +221,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
        return &avic_physical_id_table[index];
 }
 
-/**
+/*
  * Note:
  * AVIC hardware walks the nested page table to check permissions,
  * but does not use the SPA address specified in the leaf page
@@ -766,7 +764,7 @@ out:
        return ret;
 }
 
-/**
+/*
  * Note:
  * The HW cannot support posting multicast/broadcast
  * interrupts to a vCPU. So, we still use legacy interrupt
@@ -1007,7 +1005,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
        WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
 }
 
-/**
+/*
  * This function is called during VCPU halt/unhalt.
  */
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
index 5bc887e..8d36f0c 100644 (file)
@@ -199,9 +199,19 @@ static void sev_asid_free(struct kvm_sev_info *sev)
        sev->misc_cg = NULL;
 }
 
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+static void sev_decommission(unsigned int handle)
 {
        struct sev_data_decommission decommission;
+
+       if (!handle)
+               return;
+
+       decommission.handle = handle;
+       sev_guest_decommission(&decommission, NULL);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
        struct sev_data_deactivate deactivate;
 
        if (!handle)
@@ -214,9 +224,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
        sev_guest_deactivate(&deactivate, NULL);
        up_read(&sev_deactivate_lock);
 
-       /* decommission handle */
-       decommission.handle = handle;
-       sev_guest_decommission(&decommission, NULL);
+       sev_decommission(handle);
 }
 
 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
@@ -341,8 +349,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Bind ASID to this guest */
        ret = sev_bind_asid(kvm, start.handle, error);
-       if (ret)
+       if (ret) {
+               sev_decommission(start.handle);
                goto e_free_session;
+       }
 
        /* return handle to userspace */
        params.handle = start.handle;
@@ -1103,10 +1113,9 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
        struct sev_data_send_start data;
        int ret;
 
+       memset(&data, 0, sizeof(data));
        data.handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
-       if (ret < 0)
-               return ret;
 
        params->session_len = data.session_len;
        if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
@@ -1215,10 +1224,9 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
        struct sev_data_send_update_data data;
        int ret;
 
+       memset(&data, 0, sizeof(data));
        data.handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
-       if (ret < 0)
-               return ret;
 
        params->hdr_len = data.hdr_len;
        params->trans_len = data.trans_len;
index 05eca13..e088086 100644 (file)
@@ -1010,9 +1010,7 @@ static __init int svm_hardware_setup(void)
        }
 
        if (avic) {
-               if (!npt_enabled ||
-                   !boot_cpu_has(X86_FEATURE_AVIC) ||
-                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
+               if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)) {
                        avic = false;
                } else {
                        pr_info("AVIC enabled\n");
index 2c9ece6..2908c6a 100644 (file)
@@ -480,7 +480,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
 
 #define VMCB_AVIC_APIC_BAR_MASK                0xFFFFFFFFFF000ULL
 
-extern int avic;
+extern bool avic;
 
 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
 {
index a61c015..4f83914 100644 (file)
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
        TP_ARGS(msg, err),
 
        TP_STRUCT__entry(
-               __field(const char *, msg)
+               __string(msg, msg)
                __field(u32, err)
        ),
 
        TP_fast_assign(
-               __entry->msg = msg;
+               __assign_str(msg, msg);
                __entry->err = err;
        ),
 
-       TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+       TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
                __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
 );
 
index 8dee8a5..aa0e787 100644 (file)
@@ -90,8 +90,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)
 
 static inline bool cpu_has_vmx_posted_intr(void)
 {
-       return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
-               vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+       return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
 }
 
 static inline bool cpu_has_load_ia32_efer(void)
index 4597486..5f81ef0 100644 (file)
@@ -238,6 +238,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
 
 
 /*
+ * Bail out of the block loop if the VM has an assigned
+ * device, but the blocking vCPU didn't reconfigure the
+ * PI.NV to the wakeup vector, i.e. the assigned device
+ * came along after the initial check in pi_pre_block().
+ */
+void vmx_pi_start_assignment(struct kvm *kvm)
+{
+       if (!irq_remapping_cap(IRQ_POSTING_CAP))
+               return;
+
+       kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
+}
+
+/*
  * pi_update_irte - set IRTE for Posted-Interrupts
  *
  * @kvm: kvm
index 0bdc413..7f7b232 100644 (file)
@@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu);
 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
 int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
                   bool set);
+void vmx_pi_start_assignment(struct kvm *kvm);
 
 #endif /* __KVM_X86_VMX_POSTED_INTR_H */
index 4bceb5c..c2a779b 100644 (file)
@@ -4843,7 +4843,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct kvm_run *kvm_run = vcpu->run;
        u32 intr_info, ex_no, error_code;
-       unsigned long cr2, rip, dr6;
+       unsigned long cr2, dr6;
        u32 vect_info;
 
        vect_info = vmx->idt_vectoring_info;
@@ -4933,8 +4933,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
                vmx->vcpu.arch.event_exit_inst_len =
                        vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
-               rip = kvm_rip_read(vcpu);
-               kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+               kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
                kvm_run->debug.arch.exception = ex_no;
                break;
        case AC_VECTOR:
@@ -6248,6 +6247,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
        switch (kvm_get_apic_mode(vcpu)) {
        case LAPIC_MODE_INVALID:
                WARN_ONCE(true, "Invalid local APIC state");
+               break;
        case LAPIC_MODE_DISABLED:
                break;
        case LAPIC_MODE_XAPIC:
@@ -7721,6 +7721,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .nested_ops = &vmx_nested_ops,
 
        .update_pi_irte = pi_update_irte,
+       .start_assignment = vmx_pi_start_assignment,
 
 #ifdef CONFIG_X86_64
        .set_hv_timer = vmx_set_hv_timer,
index bbc4e04..e0f4a46 100644 (file)
@@ -3072,6 +3072,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.tlb_flush;
+
+       if (!tdp_enabled) {
+               /*
+                * A TLB flush on behalf of the guest is equivalent to
+                * INVPCID(all), toggling CR4.PGE, etc., which requires
+                * a forced sync of the shadow page tables.  Unload the
+                * entire MMU here and the subsequent load will sync the
+                * shadow page tables, and also flush the TLB.
+                */
+               kvm_mmu_unload(vcpu);
+               return;
+       }
+
        static_call(kvm_x86_tlb_flush_guest)(vcpu);
 }
 
@@ -3101,10 +3114,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
         * expensive IPIs.
         */
        if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+               u8 st_preempted = xchg(&st->preempted, 0);
+
                trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
-                                      st->preempted & KVM_VCPU_FLUSH_TLB);
-               if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+                                      st_preempted & KVM_VCPU_FLUSH_TLB);
+               if (st_preempted & KVM_VCPU_FLUSH_TLB)
                        kvm_vcpu_flush_tlb_guest(vcpu);
+       } else {
+               st->preempted = 0;
        }
 
        vcpu->arch.st.preempted = 0;
@@ -7089,7 +7106,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
 
 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
 {
-       emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+       vcpu->arch.hflags = emul_flags;
+       kvm_mmu_reset_context(vcpu);
 }
 
 static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
@@ -7226,6 +7246,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
        BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
 
+       ctxt->interruptibility = 0;
+       ctxt->have_exception = false;
+       ctxt->exception.vector = -1;
+       ctxt->perm_ok = false;
+
        init_decode_cache(ctxt);
        vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
 }
@@ -7561,14 +7586,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
            kvm_vcpu_check_breakpoint(vcpu, &r))
                return r;
 
-       ctxt->interruptibility = 0;
-       ctxt->have_exception = false;
-       ctxt->exception.vector = -1;
-       ctxt->perm_ok = false;
-
-       ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
-       r = x86_decode_insn(ctxt, insn, insn_len);
+       r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
 
        trace_kvm_emulate_insn_start(vcpu);
        ++vcpu->stat.insn_emulation;
@@ -8243,6 +8261,7 @@ void kvm_arch_exit(void)
        kvm_x86_ops.hardware_enable = NULL;
        kvm_mmu_module_exit();
        free_percpu(user_return_msrs);
+       kmem_cache_destroy(x86_emulator_cache);
        kmem_cache_destroy(x86_fpu_cache);
 #ifdef CONFIG_KVM_XEN
        static_key_deferred_flush(&kvm_xen_enabled);
@@ -8360,6 +8379,9 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
 
        vcpu->stat.directed_yield_attempted++;
 
+       if (single_task_running())
+               goto no_yield;
+
        rcu_read_lock();
        map = rcu_dereference(vcpu->kvm->arch.apic_map);
 
@@ -9496,7 +9518,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (r <= 0)
                        break;
 
-               kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
+               kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
                if (kvm_cpu_has_pending_timer(vcpu))
                        kvm_inject_pending_timer_irqs(vcpu);
 
@@ -10115,8 +10137,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
        kvm_update_dr7(vcpu);
 
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
-                       get_segment_base(vcpu, VCPU_SREG_CS);
+               vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
 
        /*
         * Trigger an rflags update that will inject or remove the trace
@@ -11499,7 +11520,8 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
 
 void kvm_arch_start_assignment(struct kvm *kvm)
 {
-       atomic_inc(&kvm->arch.assigned_device_count);
+       if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
+               static_call_cond(kvm_x86_start_assignment)(kvm);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
 
index 1c548ad..6bda7f6 100644 (file)
@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 
        if (si_code == SEGV_PKUERR)
                force_sig_pkuerr((void __user *)address, pkey);
-
-       force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+       else
+               force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 
        local_irq_disable();
 }
index a9639f6..470b202 100644 (file)
@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp)
 #define AMD_SME_BIT    BIT(0)
 #define AMD_SEV_BIT    BIT(1)
 
-       /* Check the SEV MSR whether SEV or SME is enabled */
-       sev_status   = __rdmsr(MSR_AMD64_SEV);
-       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
-
        /*
         * Check for the SME/SEV feature:
         *   CPUID Fn8000_001F[EAX]
@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp)
        eax = 0x8000001f;
        ecx = 0;
        native_cpuid(&eax, &ebx, &ecx, &edx);
-       if (!(eax & feature_mask))
+       /* Check whether SEV or SME is supported */
+       if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
                return;
 
        me_mask = 1UL << (ebx & 0x3f);
 
+       /* Check the SEV MSR whether SEV or SME is enabled */
+       sev_status   = __rdmsr(MSR_AMD64_SEV);
+       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
        /* Check if memory encryption is enabled */
        if (feature_mask == AMD_SME_BIT) {
                /*
index 02dc646..2edd866 100644 (file)
@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
 
+#define RS690_LOWER_TOP_OF_DRAM2       0x30
+#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
+#define RS690_UPPER_TOP_OF_DRAM2       0x31
+#define RS690_HTIU_NB_INDEX            0xA8
+#define RS690_HTIU_NB_INDEX_WR_ENABLE  0x100
+#define RS690_HTIU_NB_DATA             0xAC
+
+/*
+ * Some BIOS implementations support RAM above 4GB, but do not configure the
+ * PCI host to respond to bus master accesses for these addresses. These
+ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
+ * works as expected for addresses below 4GB.
+ *
+ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
+ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
+ */
+static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+{
+       u32 val = 0;
+       phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
+
+       if (top_of_dram <= (1ULL << 32))
+               return;
+
+       pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+                               RS690_LOWER_TOP_OF_DRAM2);
+       pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
+
+       if (val)
+               return;
+
+       pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
+
+       pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+               RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+       pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
+
+       pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+               RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+       pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
+               top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+
 #endif
index 7850111..b15ebfe 100644 (file)
@@ -450,6 +450,18 @@ void __init efi_free_boot_services(void)
                        size -= rm_size;
                }
 
+               /*
+                * Don't free memory under 1M for two reasons:
+                * - BIOS might clobber it
+                * - Crash kernel needs it to be reserved
+                */
+               if (start + size < SZ_1M)
+                       continue;
+               if (start < SZ_1M) {
+                       size -= (SZ_1M - start);
+                       start = SZ_1M;
+               }
+
                memblock_free_late(start, size);
        }
 
index 2e1c1be..6534c92 100644 (file)
@@ -29,14 +29,16 @@ void __init reserve_real_mode(void)
 
        /* Has to be under 1M so we can execute real-mode AP code. */
        mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
-       if (!mem) {
+       if (!mem)
                pr_info("No sub-1M memory is available for the trampoline\n");
-               return;
-       }
+       else
+               set_real_mode_mem(mem);
 
-       memblock_reserve(mem, size);
-       set_real_mode_mem(mem);
-       crash_reserve_low_1M();
+       /*
+        * Unconditionally reserve the entire fisrt 1M, see comment in
+        * setup_arch().
+        */
+       memblock_reserve(0, SZ_1M);
 }
 
 static void sme_sev_setup_real_mode(struct trampoline_header *th)
index 6cd7f70..d8a9152 100644 (file)
@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
                if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
                        src_cnt--;
                        src_list++;
-                       src_offs++;
+                       if (src_offs)
+                               src_offs++;
                }
 
                /* wait for any prerequisite operations */
index 0ec5b3f..6e02448 100644 (file)
@@ -226,6 +226,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
        { "AMDI0010", APD_ADDR(wt_i2c_desc) },
        { "AMD0020", APD_ADDR(cz_uart_desc) },
        { "AMDI0020", APD_ADDR(cz_uart_desc) },
+       { "AMDI0022", APD_ADDR(cz_uart_desc) },
        { "AMD0030", },
        { "AMD0040", APD_ADDR(fch_misc_desc)},
        { "HYGO0010", APD_ADDR(wt_i2c_desc) },
index 624a267..e5ba979 100644 (file)
@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                }
                break;
 
+       case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
+
+               ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+                                 "***** Address handler %p\n", object));
+
+               acpi_os_delete_mutex(object->address_space.context_mutex);
+               break;
+
        default:
 
                break;
index be7da23..a4bd673 100644 (file)
@@ -330,32 +330,21 @@ static void acpi_bus_osc_negotiate_platform_control(void)
        if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
                return;
 
-       capbuf_ret = context.ret.pointer;
-       if (context.ret.length <= OSC_SUPPORT_DWORD) {
-               kfree(context.ret.pointer);
-               return;
-       }
+       kfree(context.ret.pointer);
 
-       /*
-        * Now run _OSC again with query flag clear and with the caps
-        * supported by both the OS and the platform.
-        */
+       /* Now run _OSC again with query flag clear */
        capbuf[OSC_QUERY_DWORD] = 0;
-       capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD];
-       kfree(context.ret.pointer);
 
        if (ACPI_FAILURE(acpi_run_osc(handle, &context)))
                return;
 
        capbuf_ret = context.ret.pointer;
-       if (context.ret.length > OSC_SUPPORT_DWORD) {
-               osc_sb_apei_support_acked =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
-               osc_pc_lpi_support_confirmed =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
-               osc_sb_native_usb4_support_confirmed =
-                       capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
-       }
+       osc_sb_apei_support_acked =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+       osc_pc_lpi_support_confirmed =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+       osc_sb_native_usb4_support_confirmed =
+               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
 
        kfree(context.ret.pointer);
 }
index f973bbe..e21611c 100644 (file)
@@ -134,7 +134,7 @@ int acpi_power_init(void);
 void acpi_power_resources_list_free(struct list_head *list);
 int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
                                 struct list_head *list);
-int acpi_add_power_resource(acpi_handle handle);
+struct acpi_device *acpi_add_power_resource(acpi_handle handle);
 void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
 int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
 int acpi_device_sleep_wake(struct acpi_device *dev,
@@ -142,7 +142,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
 int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
 int acpi_power_on_resources(struct acpi_device *device, int state);
 int acpi_power_transition(struct acpi_device *device, int state);
-void acpi_turn_off_unused_power_resources(void);
+void acpi_turn_off_unused_power_resources(bool init);
 
 /* --------------------------------------------------------------------------
                               Device Power Management
index 56102ea..97c9a94 100644 (file)
@@ -52,6 +52,7 @@ struct acpi_power_resource {
        u32 system_level;
        u32 order;
        unsigned int ref_count;
+       unsigned int users;
        bool wakeup_enabled;
        struct mutex resource_lock;
        struct list_head dependents;
@@ -147,6 +148,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
 
        for (i = start; i < package->package.count; i++) {
                union acpi_object *element = &package->package.elements[i];
+               struct acpi_device *rdev;
                acpi_handle rhandle;
 
                if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
@@ -163,13 +165,16 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
                if (acpi_power_resource_is_dup(package, start, i))
                        continue;
 
-               err = acpi_add_power_resource(rhandle);
-               if (err)
+               rdev = acpi_add_power_resource(rhandle);
+               if (!rdev) {
+                       err = -ENODEV;
                        break;
-
+               }
                err = acpi_power_resources_list_add(rhandle, list);
                if (err)
                        break;
+
+               to_power_resource(rdev)->users++;
        }
        if (err)
                acpi_power_resources_list_free(list);
@@ -907,7 +912,7 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource
        mutex_unlock(&power_resource_list_lock);
 }
 
-int acpi_add_power_resource(acpi_handle handle)
+struct acpi_device *acpi_add_power_resource(acpi_handle handle)
 {
        struct acpi_power_resource *resource;
        struct acpi_device *device = NULL;
@@ -918,11 +923,11 @@ int acpi_add_power_resource(acpi_handle handle)
 
        acpi_bus_get_device(handle, &device);
        if (device)
-               return 0;
+               return device;
 
        resource = kzalloc(sizeof(*resource), GFP_KERNEL);
        if (!resource)
-               return -ENOMEM;
+               return NULL;
 
        device = &resource->device;
        acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER);
@@ -959,11 +964,11 @@ int acpi_add_power_resource(acpi_handle handle)
 
        acpi_power_add_resource_to_list(resource);
        acpi_device_add_finalize(device);
-       return 0;
+       return device;
 
  err:
        acpi_release_power_resource(&device->dev);
-       return result;
+       return NULL;
 }
 
 #ifdef CONFIG_ACPI_SLEEP
@@ -997,7 +1002,38 @@ void acpi_resume_power_resources(void)
 }
 #endif
 
-void acpi_turn_off_unused_power_resources(void)
+static void acpi_power_turn_off_if_unused(struct acpi_power_resource *resource,
+                                      bool init)
+{
+       if (resource->ref_count > 0)
+               return;
+
+       if (init) {
+               if (resource->users > 0)
+                       return;
+       } else {
+               int result, state;
+
+               result = acpi_power_get_state(resource->device.handle, &state);
+               if (result || state == ACPI_POWER_RESOURCE_STATE_OFF)
+                       return;
+       }
+
+       dev_info(&resource->device.dev, "Turning OFF\n");
+       __acpi_power_off(resource);
+}
+
+/**
+ * acpi_turn_off_unused_power_resources - Turn off power resources not in use.
+ * @init: Control switch.
+ *
+ * If @ainit is set, unconditionally turn off all of the ACPI power resources
+ * without any users.
+ *
+ * Otherwise, turn off all ACPI power resources without active references (that
+ * is, the ones that should be "off" at the moment) that are "on".
+ */
+void acpi_turn_off_unused_power_resources(bool init)
 {
        struct acpi_power_resource *resource;
 
@@ -1006,10 +1042,7 @@ void acpi_turn_off_unused_power_resources(void)
        list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
                mutex_lock(&resource->resource_lock);
 
-               if (!resource->ref_count) {
-                       dev_info(&resource->device.dev, "Turning OFF\n");
-                       __acpi_power_off(resource);
-               }
+               acpi_power_turn_off_if_unused(resource, init);
 
                mutex_unlock(&resource->resource_lock);
        }
index 453eff8..e10d38a 100644 (file)
@@ -2360,7 +2360,7 @@ int __init acpi_scan_init(void)
                }
        }
 
-       acpi_turn_off_unused_power_resources();
+       acpi_turn_off_unused_power_resources(true);
 
        acpi_scan_initialized = true;
 
index 09fd137..3bb2ade 100644 (file)
@@ -504,7 +504,7 @@ static void acpi_pm_start(u32 acpi_state)
  */
 static void acpi_pm_end(void)
 {
-       acpi_turn_off_unused_power_resources();
+       acpi_turn_off_unused_power_resources(false);
        acpi_scan_lock_release();
        /*
         * This is necessary in case acpi_pm_finish() is not called during a
@@ -1009,10 +1009,8 @@ static void acpi_sleep_hibernate_setup(void)
                return;
 
        acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
-       if (facs) {
+       if (facs)
                s4_hardware_signature = facs->hardware_signature;
-               acpi_put_table((struct acpi_table_header *)facs);
-       }
 }
 #else /* !CONFIG_HIBERNATION */
 static inline void acpi_sleep_hibernate_setup(void) {}
index b6836bf..2a61003 100644 (file)
@@ -194,6 +194,17 @@ int device_links_read_lock_held(void)
 {
        return srcu_read_lock_held(&device_links_srcu);
 }
+
+static void device_link_synchronize_removal(void)
+{
+       synchronize_srcu(&device_links_srcu);
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+       list_del_rcu(&link->s_node);
+       list_del_rcu(&link->c_node);
+}
 #else /* !CONFIG_SRCU */
 static DECLARE_RWSEM(device_links_lock);
 
@@ -224,6 +235,16 @@ int device_links_read_lock_held(void)
        return lockdep_is_held(&device_links_lock);
 }
 #endif
+
+static inline void device_link_synchronize_removal(void)
+{
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+       list_del(&link->s_node);
+       list_del(&link->c_node);
+}
 #endif /* !CONFIG_SRCU */
 
 static bool device_is_ancestor(struct device *dev, struct device *target)
@@ -445,8 +466,13 @@ static struct attribute *devlink_attrs[] = {
 };
 ATTRIBUTE_GROUPS(devlink);
 
-static void device_link_free(struct device_link *link)
+static void device_link_release_fn(struct work_struct *work)
 {
+       struct device_link *link = container_of(work, struct device_link, rm_work);
+
+       /* Ensure that all references to the link object have been dropped. */
+       device_link_synchronize_removal();
+
        while (refcount_dec_not_one(&link->rpm_active))
                pm_runtime_put(link->supplier);
 
@@ -455,24 +481,19 @@ static void device_link_free(struct device_link *link)
        kfree(link);
 }
 
-#ifdef CONFIG_SRCU
-static void __device_link_free_srcu(struct rcu_head *rhead)
-{
-       device_link_free(container_of(rhead, struct device_link, rcu_head));
-}
-
 static void devlink_dev_release(struct device *dev)
 {
        struct device_link *link = to_devlink(dev);
 
-       call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
-}
-#else
-static void devlink_dev_release(struct device *dev)
-{
-       device_link_free(to_devlink(dev));
+       INIT_WORK(&link->rm_work, device_link_release_fn);
+       /*
+        * It may take a while to complete this work because of the SRCU
+        * synchronization in device_link_release_fn() and if the consumer or
+        * supplier devices get deleted when it runs, so put it into the "long"
+        * workqueue.
+        */
+       queue_work(system_long_wq, &link->rm_work);
 }
-#endif
 
 static struct class devlink_class = {
        .name = "devlink",
@@ -846,7 +867,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(device_link_add);
 
-#ifdef CONFIG_SRCU
 static void __device_link_del(struct kref *kref)
 {
        struct device_link *link = container_of(kref, struct device_link, kref);
@@ -856,25 +876,9 @@ static void __device_link_del(struct kref *kref)
 
        pm_runtime_drop_link(link);
 
-       list_del_rcu(&link->s_node);
-       list_del_rcu(&link->c_node);
-       device_unregister(&link->link_dev);
-}
-#else /* !CONFIG_SRCU */
-static void __device_link_del(struct kref *kref)
-{
-       struct device_link *link = container_of(kref, struct device_link, kref);
-
-       dev_info(link->consumer, "Dropping the link to %s\n",
-                dev_name(link->supplier));
-
-       pm_runtime_drop_link(link);
-
-       list_del(&link->s_node);
-       list_del(&link->c_node);
+       device_link_remove_from_lists(link);
        device_unregister(&link->link_dev);
 }
-#endif /* !CONFIG_SRCU */
 
 static void device_link_put_kref(struct device_link *link)
 {
index b31b3af..d5ffaab 100644 (file)
@@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
        struct zone *zone;
        int ret;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-
        /*
         * Unaccount before offlining, such that unpopulated zone and kthreads
         * can properly be torn down in offline_pages().
         */
-       if (nr_vmemmap_pages)
+       if (nr_vmemmap_pages) {
+               zone = page_zone(pfn_to_page(start_pfn));
                adjust_present_page_count(zone, -nr_vmemmap_pages);
+       }
 
        ret = offline_pages(start_pfn + nr_vmemmap_pages,
                            nr_pages - nr_vmemmap_pages);
index d58d68f..76e12f3 100644 (file)
@@ -1879,29 +1879,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
 
 static int lo_open(struct block_device *bdev, fmode_t mode)
 {
-       struct loop_device *lo;
+       struct loop_device *lo = bdev->bd_disk->private_data;
        int err;
 
-       /*
-        * take loop_ctl_mutex to protect lo pointer from race with
-        * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
-        * release it prior to updating lo->lo_refcnt.
-        */
-       err = mutex_lock_killable(&loop_ctl_mutex);
-       if (err)
-               return err;
-       lo = bdev->bd_disk->private_data;
-       if (!lo) {
-               mutex_unlock(&loop_ctl_mutex);
-               return -ENXIO;
-       }
        err = mutex_lock_killable(&lo->lo_mutex);
-       mutex_unlock(&loop_ctl_mutex);
        if (err)
                return err;
-       atomic_inc(&lo->lo_refcnt);
+       if (lo->lo_state == Lo_deleting)
+               err = -ENXIO;
+       else
+               atomic_inc(&lo->lo_refcnt);
        mutex_unlock(&lo->lo_mutex);
-       return 0;
+       return err;
 }
 
 static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -2285,7 +2274,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
                        mutex_unlock(&lo->lo_mutex);
                        break;
                }
-               lo->lo_disk->private_data = NULL;
+               lo->lo_state = Lo_deleting;
                mutex_unlock(&lo->lo_mutex);
                idr_remove(&loop_index_idr, lo->lo_number);
                loop_remove(lo);
index a3c04f3..5beb959 100644 (file)
@@ -22,6 +22,7 @@ enum {
        Lo_unbound,
        Lo_bound,
        Lo_rundown,
+       Lo_deleting,
 };
 
 struct loop_func_table;
index b88c63f..7f6ba2c 100644 (file)
@@ -388,6 +388,8 @@ static const struct usb_device_id blacklist_table[] = {
        /* Realtek 8822CE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
                                                     BTUSB_WIDEBAND_SPEECH },
+       { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
+                                                    BTUSB_WIDEBAND_SPEECH },
 
        /* Realtek 8852AE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
index 7c810f0..b3357a8 100644 (file)
@@ -311,8 +311,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
        MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
        MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
        MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
-       MHI_CHANNEL_CONFIG_UL(32, "AT", 32, 0),
-       MHI_CHANNEL_CONFIG_DL(33, "AT", 32, 0),
+       MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+       MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
        MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
        MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
 };
@@ -708,7 +708,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
        struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
        struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
 
-       del_timer(&mhi_pdev->health_check_timer);
+       del_timer_sync(&mhi_pdev->health_check_timer);
        cancel_work_sync(&mhi_pdev->recovery_work);
 
        if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -935,9 +935,43 @@ static int __maybe_unused mhi_pci_resume(struct device *dev)
        return ret;
 }
 
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+       struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+       struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+       /* We want to stop all operations, hibernation does not guarantee that
+        * device will be in the same state as before freezing, especially if
+        * the intermediate restore kernel reinitializes MHI device with new
+        * context.
+        */
+       if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+               mhi_power_down(mhi_cntrl, false);
+               mhi_unprepare_after_power_down(mhi_cntrl);
+       }
+
+       return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+       struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+       /* Reinitialize the device */
+       queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+       return 0;
+}
+
 static const struct dev_pm_ops mhi_pci_pm_ops = {
        SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
-       SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+       .suspend = mhi_pci_suspend,
+       .resume = mhi_pci_resume,
+       .freeze = mhi_pci_freeze,
+       .thaw = mhi_pci_restore,
+       .restore = mhi_pci_restore,
+#endif
 };
 
 static struct pci_driver mhi_pci_driver = {
index 5fae60f..38cb116 100644 (file)
@@ -1334,6 +1334,34 @@ err_allow_idle:
        return error;
 }
 
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+       struct device *dev = ddata->dev;
+       int error;
+
+       /* Disable target module if it is enabled */
+       if (ddata->enabled) {
+               error = sysc_runtime_suspend(dev);
+               if (error)
+                       dev_warn(dev, "reinit suspend failed: %i\n", error);
+       }
+
+       /* Enable target module */
+       error = sysc_runtime_resume(dev);
+       if (error)
+               dev_warn(dev, "reinit resume failed: %i\n", error);
+
+       if (leave_enabled)
+               return error;
+
+       /* Disable target module if no leave_enabled was set */
+       error = sysc_runtime_suspend(dev);
+       if (error)
+               dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+       return error;
+}
+
 static int __maybe_unused sysc_noirq_suspend(struct device *dev)
 {
        struct sysc *ddata;
@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
            (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
-       return pm_runtime_force_suspend(dev);
+       if (!ddata->enabled)
+               return 0;
+
+       ddata->needs_resume = 1;
+
+       return sysc_runtime_suspend(dev);
 }
 
 static int __maybe_unused sysc_noirq_resume(struct device *dev)
 {
        struct sysc *ddata;
+       int error = 0;
 
        ddata = dev_get_drvdata(dev);
 
@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
            (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
                return 0;
 
-       return pm_runtime_force_resume(dev);
+       if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+               error = sysc_reinit_module(ddata, ddata->needs_resume);
+               if (error)
+                       dev_warn(dev, "noirq_resume failed: %i\n", error);
+       } else if (ddata->needs_resume) {
+               error = sysc_runtime_resume(dev);
+               if (error)
+                       dev_warn(dev, "noirq_resume failed: %i\n", error);
+       }
+
+       ddata->needs_resume = 0;
+
+       return error;
 }
 
 static const struct dev_pm_ops sysc_pm_ops = {
@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
        /* Uarts on omap4 and later */
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
-                  SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
-                  SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
 
        /* Quirks that need to be set based on the module address */
        SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1459,6 +1505,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+       SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -1466,7 +1514,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
                   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
-                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+                  SYSC_QUIRK_REINIT_ON_RESUME),
        SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
                   SYSC_MODULE_QUIRK_WDT),
        /* PRUSS on am3, am4 and am5 */
@@ -1524,7 +1573,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
        SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
        SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
-       SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
        SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
index a5c5f70..e65e0a4 100644 (file)
@@ -19,16 +19,6 @@ config ACPI_CPPC_CPUFREQ
 
          If in doubt, say N.
 
-config ACPI_CPPC_CPUFREQ_FIE
-       bool "Frequency Invariance support for CPPC cpufreq driver"
-       depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
-       default y
-       help
-         This extends frequency invariance support in the CPPC cpufreq driver,
-         by using CPPC delivered and reference performance counters.
-
-         If in doubt, say N.
-
 config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
        tristate "Allwinner nvmem based SUN50I CPUFreq driver"
        depends on ARCH_SUNXI
index 3848b4c..2f769b1 100644 (file)
 
 #define pr_fmt(fmt)    "CPPC Cpufreq:" fmt
 
-#include <linux/arch_topology.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/dmi.h>
-#include <linux/irq_work.h>
-#include <linux/kthread.h>
 #include <linux/time.h>
 #include <linux/vmalloc.h>
-#include <uapi/linux/sched/types.h>
 
 #include <asm/unaligned.h>
 
@@ -61,204 +57,6 @@ static struct cppc_workaround_oem_info wa_info[] = {
        }
 };
 
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
-
-/* Frequency invariance support */
-struct cppc_freq_invariance {
-       int cpu;
-       struct irq_work irq_work;
-       struct kthread_work work;
-       struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
-       struct cppc_cpudata *cpu_data;
-};
-
-static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
-static struct kthread_worker *kworker_fie;
-static bool fie_disabled;
-
-static struct cpufreq_driver cppc_cpufreq_driver;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t0,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t1);
-
-/**
- * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
- * @work: The work item.
- *
- * The CPPC driver register itself with the topology core to provide its own
- * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
- * gets called by the scheduler on every tick.
- *
- * Note that the arch specific counters have higher priority than CPPC counters,
- * if available, though the CPPC driver doesn't need to have any special
- * handling for that.
- *
- * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
- * reach here from hard-irq context), which then schedules a normal work item
- * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
- * based on the counter updates since the last tick.
- */
-static void cppc_scale_freq_workfn(struct kthread_work *work)
-{
-       struct cppc_freq_invariance *cppc_fi;
-       struct cppc_perf_fb_ctrs fb_ctrs = {0};
-       struct cppc_cpudata *cpu_data;
-       unsigned long local_freq_scale;
-       u64 perf;
-
-       cppc_fi = container_of(work, struct cppc_freq_invariance, work);
-       cpu_data = cppc_fi->cpu_data;
-
-       if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
-               pr_warn("%s: failed to read perf counters\n", __func__);
-               return;
-       }
-
-       cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
-       perf = cppc_perf_from_fbctrs(cpu_data, cppc_fi->prev_perf_fb_ctrs,
-                                    fb_ctrs);
-
-       perf <<= SCHED_CAPACITY_SHIFT;
-       local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
-       if (WARN_ON(local_freq_scale > 1024))
-               local_freq_scale = 1024;
-
-       per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
-}
-
-static void cppc_irq_work(struct irq_work *irq_work)
-{
-       struct cppc_freq_invariance *cppc_fi;
-
-       cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
-       kthread_queue_work(kworker_fie, &cppc_fi->work);
-}
-
-static void cppc_scale_freq_tick(void)
-{
-       struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
-
-       /*
-        * cppc_get_perf_ctrs() can potentially sleep, call that from the right
-        * context.
-        */
-       irq_work_queue(&cppc_fi->irq_work);
-}
-
-static struct scale_freq_data cppc_sftd = {
-       .source = SCALE_FREQ_SOURCE_CPPC,
-       .set_freq_scale = cppc_scale_freq_tick,
-};
-
-static void cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
-                                            struct cppc_cpudata *cpu_data)
-{
-       struct cppc_perf_fb_ctrs fb_ctrs = {0};
-       struct cppc_freq_invariance *cppc_fi;
-       int i, ret;
-
-       if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
-               return;
-
-       if (fie_disabled)
-               return;
-
-       for_each_cpu(i, policy->cpus) {
-               cppc_fi = &per_cpu(cppc_freq_inv, i);
-               cppc_fi->cpu = i;
-               cppc_fi->cpu_data = cpu_data;
-               kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
-               init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
-
-               ret = cppc_get_perf_ctrs(i, &fb_ctrs);
-               if (ret) {
-                       pr_warn("%s: failed to read perf counters: %d\n",
-                               __func__, ret);
-                       fie_disabled = true;
-               } else {
-                       cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
-               }
-       }
-}
-
-static void __init cppc_freq_invariance_init(void)
-{
-       struct sched_attr attr = {
-               .size           = sizeof(struct sched_attr),
-               .sched_policy   = SCHED_DEADLINE,
-               .sched_nice     = 0,
-               .sched_priority = 0,
-               /*
-                * Fake (unused) bandwidth; workaround to "fix"
-                * priority inheritance.
-                */
-               .sched_runtime  = 1000000,
-               .sched_deadline = 10000000,
-               .sched_period   = 10000000,
-       };
-       int ret;
-
-       if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
-               return;
-
-       if (fie_disabled)
-               return;
-
-       kworker_fie = kthread_create_worker(0, "cppc_fie");
-       if (IS_ERR(kworker_fie))
-               return;
-
-       ret = sched_setattr_nocheck(kworker_fie->task, &attr);
-       if (ret) {
-               pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
-                       ret);
-               kthread_destroy_worker(kworker_fie);
-               return;
-       }
-
-       /* Register for freq-invariance */
-       topology_set_scale_freq_source(&cppc_sftd, cpu_present_mask);
-}
-
-static void cppc_freq_invariance_exit(void)
-{
-       struct cppc_freq_invariance *cppc_fi;
-       int i;
-
-       if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
-               return;
-
-       if (fie_disabled)
-               return;
-
-       topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, cpu_present_mask);
-
-       for_each_possible_cpu(i) {
-               cppc_fi = &per_cpu(cppc_freq_inv, i);
-               irq_work_sync(&cppc_fi->irq_work);
-       }
-
-       kthread_destroy_worker(kworker_fie);
-       kworker_fie = NULL;
-}
-
-#else
-static inline void
-cppc_freq_invariance_policy_init(struct cpufreq_policy *policy,
-                                struct cppc_cpudata *cpu_data)
-{
-}
-
-static inline void cppc_freq_invariance_init(void)
-{
-}
-
-static inline void cppc_freq_invariance_exit(void)
-{
-}
-#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-
 /* Callback function used to retrieve the max frequency from DMI */
 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
 {
@@ -547,12 +345,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
        cpu_data->perf_ctrls.desired_perf =  caps->highest_perf;
 
        ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
-       if (ret) {
+       if (ret)
                pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
                         caps->highest_perf, cpu, ret);
-       } else {
-               cppc_freq_invariance_policy_init(policy, cpu_data);
-       }
 
        return ret;
 }
@@ -565,12 +360,12 @@ static inline u64 get_delta(u64 t1, u64 t0)
        return (u32)t1 - (u32)t0;
 }
 
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t0,
-                                struct cppc_perf_fb_ctrs fb_ctrs_t1)
+static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
+                                    struct cppc_perf_fb_ctrs fb_ctrs_t0,
+                                    struct cppc_perf_fb_ctrs fb_ctrs_t1)
 {
        u64 delta_reference, delta_delivered;
-       u64 reference_perf;
+       u64 reference_perf, delivered_perf;
 
        reference_perf = fb_ctrs_t0.reference_perf;
 
@@ -579,21 +374,12 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
        delta_delivered = get_delta(fb_ctrs_t1.delivered,
                                    fb_ctrs_t0.delivered);
 
-       /* Check to avoid divide-by zero and invalid delivered_perf */
-       if (!delta_reference || !delta_delivered)
-               return cpu_data->perf_ctrls.desired_perf;
-
-       return (reference_perf * delta_delivered) / delta_reference;
-}
-
-static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
-                                    struct cppc_perf_fb_ctrs fb_ctrs_t0,
-                                    struct cppc_perf_fb_ctrs fb_ctrs_t1)
-{
-       u64 delivered_perf;
-
-       delivered_perf = cppc_perf_from_fbctrs(cpu_data, fb_ctrs_t0,
-                                              fb_ctrs_t1);
+       /* Check to avoid divide-by zero */
+       if (delta_reference || delta_delivered)
+               delivered_perf = (reference_perf * delta_delivered) /
+                                       delta_reference;
+       else
+               delivered_perf = cpu_data->perf_ctrls.desired_perf;
 
        return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
 }
@@ -718,8 +504,6 @@ static void cppc_check_hisi_workaround(void)
 
 static int __init cppc_cpufreq_init(void)
 {
-       int ret;
-
        if ((acpi_disabled) || !acpi_cpc_valid())
                return -ENODEV;
 
@@ -727,11 +511,7 @@ static int __init cppc_cpufreq_init(void)
 
        cppc_check_hisi_workaround();
 
-       ret = cpufreq_register_driver(&cppc_cpufreq_driver);
-       if (!ret)
-               cppc_freq_invariance_init();
-
-       return ret;
+       return cpufreq_register_driver(&cppc_cpufreq_driver);
 }
 
 static inline void free_cpu_data(void)
@@ -748,7 +528,6 @@ static inline void free_cpu_data(void)
 
 static void __exit cppc_cpufreq_exit(void)
 {
-       cppc_freq_invariance_exit();
        cpufreq_unregister_driver(&cppc_cpufreq_driver);
 
        free_cpu_data();
index 6ab9d9a..39b5b46 100644 (file)
@@ -59,6 +59,7 @@ config DMA_OF
 #devices
 config ALTERA_MSGDMA
        tristate "Altera / Intel mSGDMA Engine"
+       depends on HAS_IOMEM
        select DMA_ENGINE
        help
          Enable support for Altera / Intel mSGDMA controller.
@@ -701,6 +702,7 @@ config XILINX_ZYNQMP_DMA
 
 config XILINX_ZYNQMP_DPDMA
        tristate "Xilinx DPDMA Engine"
+       depends on HAS_IOMEM && OF
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
index 4ec909e..4ae0579 100644 (file)
@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
        }
 
        if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+               err = -EINVAL;
                dev_err(dev, "DPDMAI major version mismatch\n"
                             "Found %u.%u, supported version is %u.%u\n",
                                priv->dpdmai_attr.version.major,
@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
        }
 
        if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+               err = -EINVAL;
                dev_err(dev, "DPDMAI minor version mismatch\n"
                             "Found %u.%u, supported version is %u.%u\n",
                                priv->dpdmai_attr.version.major,
@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
                ppriv->store =
                        dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
                if (!ppriv->store) {
+                       err = -ENOMEM;
                        dev_err(dev, "dpaa2_io_store_create() failed\n");
                        goto err_store;
                }
index 302cba5..d4419bf 100644 (file)
@@ -110,6 +110,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
                pasid = iommu_sva_get_pasid(sva);
                if (pasid == IOMMU_PASID_INVALID) {
                        iommu_sva_unbind_device(sva);
+                       rc = -EINVAL;
                        goto failed;
                }
 
index 2a926be..442d55c 100644 (file)
@@ -168,6 +168,32 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        return rc;
 }
 
+static void idxd_cleanup_interrupts(struct idxd_device *idxd)
+{
+       struct pci_dev *pdev = idxd->pdev;
+       struct idxd_irq_entry *irq_entry;
+       int i, msixcnt;
+
+       msixcnt = pci_msix_vec_count(pdev);
+       if (msixcnt <= 0)
+               return;
+
+       irq_entry = &idxd->irq_entries[0];
+       free_irq(irq_entry->vector, irq_entry);
+
+       for (i = 1; i < msixcnt; i++) {
+
+               irq_entry = &idxd->irq_entries[i];
+               if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
+                       idxd_device_release_int_handle(idxd, idxd->int_handles[i],
+                                                      IDXD_IRQ_MSIX);
+               free_irq(irq_entry->vector, irq_entry);
+       }
+
+       idxd_mask_error_interrupts(idxd);
+       pci_free_irq_vectors(pdev);
+}
+
 static int idxd_setup_wqs(struct idxd_device *idxd)
 {
        struct device *dev = &idxd->pdev->dev;
@@ -242,6 +268,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
                engine->idxd = idxd;
                device_initialize(&engine->conf_dev);
                engine->conf_dev.parent = &idxd->conf_dev;
+               engine->conf_dev.bus = &dsa_bus_type;
                engine->conf_dev.type = &idxd_engine_device_type;
                rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
                if (rc < 0) {
@@ -303,6 +330,19 @@ static int idxd_setup_groups(struct idxd_device *idxd)
        return rc;
 }
 
+static void idxd_cleanup_internals(struct idxd_device *idxd)
+{
+       int i;
+
+       for (i = 0; i < idxd->max_groups; i++)
+               put_device(&idxd->groups[i]->conf_dev);
+       for (i = 0; i < idxd->max_engines; i++)
+               put_device(&idxd->engines[i]->conf_dev);
+       for (i = 0; i < idxd->max_wqs; i++)
+               put_device(&idxd->wqs[i]->conf_dev);
+       destroy_workqueue(idxd->wq);
+}
+
 static int idxd_setup_internals(struct idxd_device *idxd)
 {
        struct device *dev = &idxd->pdev->dev;
@@ -531,12 +571,12 @@ static int idxd_probe(struct idxd_device *idxd)
                dev_dbg(dev, "Loading RO device config\n");
                rc = idxd_device_load_config(idxd);
                if (rc < 0)
-                       goto err;
+                       goto err_config;
        }
 
        rc = idxd_setup_interrupts(idxd);
        if (rc)
-               goto err;
+               goto err_config;
 
        dev_dbg(dev, "IDXD interrupt setup complete.\n");
 
@@ -549,6 +589,8 @@ static int idxd_probe(struct idxd_device *idxd)
        dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
        return 0;
 
+ err_config:
+       idxd_cleanup_internals(idxd);
  err:
        if (device_pasid_enabled(idxd))
                idxd_disable_system_pasid(idxd);
@@ -556,6 +598,18 @@ static int idxd_probe(struct idxd_device *idxd)
        return rc;
 }
 
+static void idxd_cleanup(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+
+       perfmon_pmu_remove(idxd);
+       idxd_cleanup_interrupts(idxd);
+       idxd_cleanup_internals(idxd);
+       if (device_pasid_enabled(idxd))
+               idxd_disable_system_pasid(idxd);
+       iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+}
+
 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct device *dev = &pdev->dev;
@@ -608,7 +662,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        rc = idxd_register_devices(idxd);
        if (rc) {
                dev_err(dev, "IDXD sysfs setup failed\n");
-               goto err;
+               goto err_dev_register;
        }
 
        idxd->state = IDXD_DEV_CONF_READY;
@@ -618,6 +672,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        return 0;
 
+ err_dev_register:
+       idxd_cleanup(idxd);
  err:
        pci_iounmap(pdev, idxd->reg_base);
  err_iomap:
@@ -745,12 +801,12 @@ static int __init idxd_init_module(void)
         * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
         * enumerating the device. We can not utilize it.
         */
-       if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
+       if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
                pr_warn("idxd driver failed to load without MOVDIR64B.\n");
                return -ENODEV;
        }
 
-       if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+       if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
                pr_warn("Platform does not have ENQCMD(S) support.\n");
        else
                support_enqcmd = true;
@@ -787,6 +843,7 @@ module_init(idxd_init_module);
 
 static void __exit idxd_exit_module(void)
 {
+       idxd_unregister_driver();
        pci_unregister_driver(&idxd_pci_driver);
        idxd_cdev_remove();
        idxd_unregister_bus_type();
index 0d5c42f..97d9a6f 100644 (file)
@@ -230,7 +230,7 @@ out:
 }
 
 /**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
+ * ipu_irq_unmap() - unmap an IPU interrupt source
  * @source:    interrupt source bit position (see ipu_irq_map())
  * @return:    0 or negative error code
  */
index 27c0735..375e7e6 100644 (file)
@@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
 
 static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
 {
-       struct dma_chan *chan = vd->tx.chan;
-       struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
-
-       kfree(c->desc);
+       kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
 }
 
 static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
@@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
 
 static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
 {
-       struct mtk_uart_apdma_desc *d = c->desc;
-
        mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
        mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
        mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
-
-       list_del(&d->vd.node);
-       vchan_cookie_complete(&d->vd);
 }
 
 static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
@@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
 
        c->rx_status = d->avail_len - cnt;
        mtk_uart_apdma_write(c, VFF_RPT, wg);
+}
 
-       list_del(&d->vd.node);
-       vchan_cookie_complete(&d->vd);
+static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
+{
+       struct mtk_uart_apdma_desc *d = c->desc;
+
+       if (d) {
+               list_del(&d->vd.node);
+               vchan_cookie_complete(&d->vd);
+               c->desc = NULL;
+       }
 }
 
 static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
@@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
                mtk_uart_apdma_rx_handler(c);
        else if (c->dir == DMA_MEM_TO_DEV)
                mtk_uart_apdma_tx_handler(c);
+       mtk_uart_apdma_chan_complete_handler(c);
        spin_unlock_irqrestore(&c->vc.lock, flags);
 
        return IRQ_HANDLED;
@@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
                return NULL;
 
        /* Now allocate and setup the descriptor */
-       d = kzalloc(sizeof(*d), GFP_ATOMIC);
+       d = kzalloc(sizeof(*d), GFP_NOWAIT);
        if (!d)
                return NULL;
 
@@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
        unsigned long flags;
 
        spin_lock_irqsave(&c->vc.lock, flags);
-       if (vchan_issue_pending(&c->vc)) {
+       if (vchan_issue_pending(&c->vc) && !c->desc) {
                vd = vchan_next_desc(&c->vc);
                c->desc = to_mtk_uart_apdma_desc(&vd->tx);
 
index fd8d2bc..110de8a 100644 (file)
@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
        for (i = 0; i < len / period_len; i++) {
                desc = pl330_get_desc(pch);
                if (!desc) {
+                       unsigned long iflags;
+
                        dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
                                __func__, __LINE__);
 
                        if (!first)
                                return NULL;
 
-                       spin_lock_irqsave(&pl330->pool_lock, flags);
+                       spin_lock_irqsave(&pl330->pool_lock, iflags);
 
                        while (!list_empty(&first->node)) {
                                desc = list_entry(first->node.next,
@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
 
                        list_move_tail(&first->node, &pl330->desc_pool);
 
-                       spin_unlock_irqrestore(&pl330->pool_lock, flags);
+                       spin_unlock_irqrestore(&pl330->pool_lock, iflags);
 
                        return NULL;
                }
index 365f94e..3f926a6 100644 (file)
@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
 
 config QCOM_HIDMA_MGMT
        tristate "Qualcomm Technologies HIDMA Management support"
+       depends on HAS_IOMEM
        select DMA_ENGINE
        help
          Enable support for the Qualcomm Technologies HIDMA Management.
index f8ffa02..ba46a0a 100644 (file)
@@ -1,5 +1,6 @@
 config SF_PDMA
        tristate "Sifive PDMA controller driver"
+       depends on HAS_IOMEM
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
index d530c1b..6885b3d 100644 (file)
@@ -1913,7 +1913,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
 
        /* Enable runtime PM and initialize the device. */
        pm_runtime_enable(&pdev->dev);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
                return ret;
index 265d7c0..e182739 100644 (file)
@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
 
        kfree(base->lcla_pool.base_unaligned);
 
+       if (base->lcpa_base)
+               iounmap(base->lcpa_base);
+
        if (base->phy_lcpa)
                release_mem_region(base->phy_lcpa,
                                   base->lcpa_size);
index 36ba8b4..18cbd1e 100644 (file)
@@ -1452,7 +1452,7 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
                return -ENOMEM;
        }
 
-       ret = pm_runtime_get_sync(dmadev->ddev.dev);
+       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
        if (ret < 0)
                return ret;
 
@@ -1718,7 +1718,7 @@ static int stm32_mdma_pm_suspend(struct device *dev)
        u32 ccr, id;
        int ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
                return ret;
 
index 70b29bd..6c70980 100644 (file)
 #define XILINX_DPDMA_CH_VDO                            0x020
 #define XILINX_DPDMA_CH_PYLD_SZ                                0x024
 #define XILINX_DPDMA_CH_DESC_ID                                0x028
+#define XILINX_DPDMA_CH_DESC_ID_MASK                   GENMASK(15, 0)
 
 /* DPDMA descriptor fields */
 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE             0xa5
@@ -866,7 +867,8 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
         * will be used, but it should be enough.
         */
        list_for_each_entry(sw_desc, &desc->descriptors, node)
-               sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
+               sw_desc->hw.desc_id = desc->vdesc.tx.cookie
+                                   & XILINX_DPDMA_CH_DESC_ID_MASK;
 
        sw_desc = list_first_entry(&desc->descriptors,
                                   struct xilinx_dpdma_sw_desc, node);
@@ -1086,7 +1088,8 @@ static void xilinx_dpdma_chan_vsync_irq(struct  xilinx_dpdma_chan *chan)
        if (!chan->running || !pending)
                goto out;
 
-       desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
+       desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
+               & XILINX_DPDMA_CH_DESC_ID_MASK;
 
        /* If the retrigger raced with vsync, retry at the next frame. */
        sw_desc = list_first_entry(&pending->descriptors,
@@ -1459,7 +1462,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
  */
 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
 {
-       dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+       dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
        dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
 }
 
@@ -1596,6 +1599,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
        return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
 }
 
+static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
+{
+       unsigned int i;
+       void __iomem *reg;
+
+       /* Disable all interrupts */
+       xilinx_dpdma_disable_irq(xdev);
+
+       /* Stop all channels */
+       for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
+               reg = xdev->reg + XILINX_DPDMA_CH_BASE
+                               + XILINX_DPDMA_CH_OFFSET * i;
+               dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+       }
+
+       /* Clear the interrupt status registers */
+       dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
+       dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
+}
+
 static int xilinx_dpdma_probe(struct platform_device *pdev)
 {
        struct xilinx_dpdma_device *xdev;
@@ -1622,6 +1645,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
        if (IS_ERR(xdev->reg))
                return PTR_ERR(xdev->reg);
 
+       dpdma_hw_init(xdev);
+
        xdev->irq = platform_get_irq(pdev, 0);
        if (xdev->irq < 0) {
                dev_err(xdev->dev, "failed to get platform irq\n");
index d841956..5fecf5a 100644 (file)
@@ -468,7 +468,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
        struct zynqmp_dma_desc_sw *desc;
        int i, ret;
 
-       ret = pm_runtime_get_sync(chan->dev);
+       ret = pm_runtime_resume_and_get(chan->dev);
        if (ret < 0)
                return ret;
 
index e15d484..ea7ca74 100644 (file)
@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
        if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
                return 0;
 
-       n = 0;
-       len = CPER_REC_LEN - 1;
+       len = CPER_REC_LEN;
        dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
        if (bank && device)
                n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
                             "DIMM location: not present. DMI handle: 0x%.4x ",
                             mem->mem_dev_handle);
 
-       msg[n] = '\0';
        return n;
 }
 
index bb042ab..e901f85 100644 (file)
@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
 
+       if (!fdt)
+               return 0;
+
        for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
                node = fdt_path_offset(fdt, dt_params[i].path);
                if (node < 0)
index 4e81c60..dd95f33 100644 (file)
@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
                return 0;
 
        /* Skip any leading slashes */
-       while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+       while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
                i++;
 
        while (--result_len > 0 && i < cmdline_len) {
index 5737cb0..0a9aba5 100644 (file)
@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
                return false;
        }
 
-       if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
-               pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
-               return false;
-       }
-
        if (PAGE_SIZE > EFI_PAGE_SIZE &&
            (!PAGE_ALIGNED(in->phys_addr) ||
             !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
index 1cbce59..97e6cae 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/slab.h>
 #include <linux/of_device.h>
 
-#define WCD_PIN_MASK(p) BIT(p - 1)
+#define WCD_PIN_MASK(p) BIT(p)
 #define WCD_REG_DIR_CTL_OFFSET 0x42
 #define WCD_REG_VAL_CTL_OFFSET 0x43
 #define WCD934X_NPINS          5
index fad3b91..d39cff4 100644 (file)
@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
                                mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 1:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
                                mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 2:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
-                               mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+                               mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 3:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
-                               mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+                               mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        }
 
@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
                        engine_id, queue_id);
        uint32_t i = 0, reg;
 #undef HQD_N_REGS
-#define HQD_N_REGS (19+6+7+10)
+#define HQD_N_REGS (19+6+7+12)
 
        *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
        if (*dump == NULL)
index 0350205..6819fe5 100644 (file)
@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
 {
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr;
-       unsigned long ras_counter;
 
        if (!fpriv)
                return -EINVAL;
@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
        if (atomic_read(&ctx->guilty))
                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
 
-       /*query ue count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, false);
-       /*ras counter is monotonic increasing*/
-       if (ras_counter != ctx->ras_counter_ue) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
-               ctx->ras_counter_ue = ras_counter;
-       }
-
-       /*query ce count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, true);
-       if (ras_counter != ctx->ras_counter_ce) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
-               ctx->ras_counter_ce = ras_counter;
-       }
-
        mutex_unlock(&mgr->lock);
        return 0;
 }
index 66ddfe4..57ec108 100644 (file)
@@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
  */
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
 {
-       if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+       if (amdgpu_sriov_vf(adev) || 
+           adev->enable_virtual_display ||
+           (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
                return false;
 
        return amdgpu_device_asic_has_dc_support(adev->asic_type);
index 8a1fb8b..c13985f 100644 (file)
@@ -1057,7 +1057,7 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
 
        return 0;
 err:
-       drm_err(dev, "Failed to init gem fb: %d\n", ret);
+       drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
        rfb->base.obj[0] = NULL;
        return ret;
 }
@@ -1094,7 +1094,7 @@ int amdgpu_display_gem_fb_verify_and_init(
 
        return 0;
 err:
-       drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+       drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
        rfb->base.obj[0] = NULL;
        return ret;
 }
index 8f4a8f8..39b6c6b 100644 (file)
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 {
        unsigned char buff[34];
-       int addrptr = 0, size = 0;
+       int addrptr, size;
+       int len;
 
        if (!is_fru_eeprom_supported(adev))
                return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        /* If algo exists, it means that the i2c_adapter's initialized */
        if (!adev->pm.smu_i2c.algo) {
                DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
-               return 0;
+               return -ENODEV;
        }
 
        /* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        /* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product name, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product name should only be 32 characters. Any more,
         * and something could be wrong. Cap it at 32 to be safe
         */
-       if (size > 32) {
+       if (len >= sizeof(adev->product_name)) {
                DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
-               size = 32;
+               len = sizeof(adev->product_name) - 1;
        }
        /* Start at 2 due to buff using fields 0 and 1 for the address */
-       memcpy(adev->product_name, &buff[2], size);
-       adev->product_name[size] = '\0';
+       memcpy(adev->product_name, &buff[2], len);
+       adev->product_name[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->product_number)) {
                DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->product_number) - 1;
        }
-       memcpy(adev->product_number, &buff[2], size);
-       adev->product_number[size] = '\0';
+       memcpy(adev->product_number, &buff[2], len);
+       adev->product_number[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product version, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Serial number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->serial)) {
                DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->serial) - 1;
        }
-       memcpy(adev->serial, &buff[2], size);
-       adev->serial[size] = '\0';
+       memcpy(adev->serial, &buff[2], len);
+       adev->serial[len] = '\0';
 
        return 0;
 }
index 1345f7e..f9434bc 100644 (file)
@@ -100,7 +100,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
                kfree(ubo->metadata);
        }
 
-       kfree(bo);
+       kvfree(bo);
 }
 
 /**
@@ -552,7 +552,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 
        *bo_ptr = NULL;
-       bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+       bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
        drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
index 46a5328..60aa99a 100644 (file)
@@ -76,6 +76,7 @@ struct psp_ring
        uint64_t                        ring_mem_mc_addr;
        void                            *ring_mem_handle;
        uint32_t                        ring_size;
+       uint32_t                        ring_wptr;
 };
 
 /* More registers may will be supported */
index 7ce76a6..327b1f8 100644 (file)
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid              0x2030
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX     0
 
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid               0x4ca5
+#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX      1
+
 #define GFX_RLCG_GC_WRITE_OLD  (0x8 << 28)
 #define GFX_RLCG_GC_WRITE      (0x0 << 28)
 #define GFX_RLCG_GC_READ       (0x1 << 28)
@@ -1480,8 +1483,15 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
                       (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
        scratch_reg3 = adev->rmmio +
                       (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
-       spare_int = adev->rmmio +
-                   (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+       if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+               spare_int = adev->rmmio +
+                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+                            + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
+       } else {
+               spare_int = adev->rmmio +
+                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+       }
 
        grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
        grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
@@ -6861,8 +6871,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                        (adev->doorbell_index.kiq * 2) << 2);
+               /* If GC has entered CGPG, ringing doorbell > first page doesn't
+                * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+                * this issue.
+                */
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-                       (adev->doorbell_index.userqueue_end * 2) << 2);
+                       (adev->doorbell.size - 4));
        }
 
        WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
@@ -7349,9 +7363,15 @@ static int gfx_v10_0_hw_fini(void *handle)
        if (amdgpu_sriov_vf(adev)) {
                gfx_v10_0_cp_gfx_enable(adev, false);
                /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
-               tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
-               tmp &= 0xffffff00;
-               WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+               if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+                       tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
+                       tmp &= 0xffffff00;
+                       WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+               } else {
+                       tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+                       tmp &= 0xffffff00;
+                       WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+               }
 
                return 0;
        }
index 516467e..c09225d 100644 (file)
@@ -3673,8 +3673,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
+               /* If GC has entered CGPG, ringing doorbell > first page doesn't
+                * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
+                * this issue.
+                */
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
-                                       (adev->doorbell_index.userqueue_end * 2) << 2);
+                                       (adev->doorbell.size - 4));
        }
 
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
index de5abce..85967a5 100644 (file)
@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
index 938ef4c..46096ad 100644 (file)
@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
 static int jpeg_v2_5_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
                if (adev->jpeg.harvest_config & (1 << i))
                        continue;
 
-               ring = &adev->jpeg.inst[i].ring_dec;
                if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
                      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
                        jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
index 94be353..bd77794 100644 (file)
@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
 static int jpeg_v3_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
 
-       ring = &adev->jpeg.inst->ring_dec;
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
index 589410c..02bba1f 100644 (file)
@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 
@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
        if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index f2e725f..908664a 100644 (file)
@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
        return data;
@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
                /* send interrupt to PSP for SRIOV ring write pointer update */
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                        GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index 2bab9c7..cf3803f 100644 (file)
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unpin(bo);
        amdgpu_bo_unreserve(bo);
        amdgpu_bo_unref(&bo);
        return r;
index 0c1beef..27b1ced 100644 (file)
@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
-               RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+               (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+                RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
                vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+       }
 
        return 0;
 }
index 116b964..8af567c 100644 (file)
@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
            (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
index 948813d..888b17d 100644 (file)
@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
index 14470da..3b23de9 100644 (file)
@@ -372,15 +372,14 @@ done:
 static int vcn_v3_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
 
-               ring = &adev->vcn.inst[i].ring_dec;
-
                if (!amdgpu_sriov_vf(adev)) {
                        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                                        (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
index 389eff9..652cc1a 100644 (file)
@@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
        }
 
-       adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+       if (!adev->dm.dc->ctx->dmub_srv)
+               adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
        if (!adev->dm.dc->ctx->dmub_srv) {
                DRM_ERROR("Couldn't allocate DC DMUB server!\n");
                return -ENOMEM;
@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle)
 
        amdgpu_dm_irq_suspend(adev);
 
-
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
        return 0;
@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        struct drm_display_mode saved_mode;
        struct drm_display_mode *freesync_mode = NULL;
        bool native_mode_found = false;
-       bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+       bool recalculate_timing = false;
+       bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing |= amdgpu_freesync_vid_mode &&
+               recalculate_timing = amdgpu_freesync_vid_mode &&
                                 is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        mode = *freesync_mode;
                } else {
                        decide_crtc_timing_for_drm_display_mode(
-                               &mode, preferred_mode,
-                               dm_state ? (dm_state->scaling != RMX_OFF) : false);
-               }
+                               &mode, preferred_mode, scale);
 
-               preferred_refresh = drm_mode_vrefresh(preferred_mode);
+                       preferred_refresh = drm_mode_vrefresh(preferred_mode);
+               }
        }
 
        if (recalculate_timing)
@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        * If scaling is enabled and refresh rate didn't change
        * we copy the vic and polarities of the old timings
        */
-       if (!recalculate_timing || mode_refresh != preferred_refresh)
+       if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(
                        stream, &mode, &aconnector->base, con_state, NULL,
                        requested_bpc);
@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
 
        if (cursor_scale_w != primary_scale_w ||
            cursor_scale_h != primary_scale_h) {
-               DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+               drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
                return -EINVAL;
        }
 
@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state)
        int i;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane_state *primary_state, *overlay_state = NULL;
+       struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
 
        /* Check if primary plane is contained inside overlay */
        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state)
        if (!primary_state->crtc)
                return 0;
 
+       /* check if cursor plane is enabled */
+       cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+       if (IS_ERR(cursor_state))
+               return PTR_ERR(cursor_state);
+
+       if (drm_atomic_plane_disabling(plane->state, cursor_state))
+               return 0;
+
        /* Perform the bounds check to ensure the overlay plane covers the primary */
        if (primary_state->crtc_x < overlay_state->crtc_x ||
            primary_state->crtc_y < overlay_state->crtc_y ||
index 527e56c..8357aa3 100644 (file)
@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
        voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
        dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
 
-       if (voltage_supported && dummy_pstate_supported) {
+       if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
                context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
                goto restore_dml_state;
        }
index f5fe540..27cf227 100644 (file)
@@ -810,6 +810,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
                data->fine_grain_enabled = 1;
+               break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
        default:
                break;
index ac13042..0eaf86b 100644 (file)
@@ -2925,6 +2925,8 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
 
 static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
 {
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *smc_pptable = table_context->driver_pptable;
        struct amdgpu_device *adev = smu->adev;
        uint32_t param = 0;
 
@@ -2932,6 +2934,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
        if (adev->asic_type == CHIP_NAVI12)
                return 0;
 
+       /*
+        * Skip the MGpuFanBoost setting for those ASICs
+        * which do not support it
+        */
+       if (!smc_pptable->MGpuFanBoostLimitRpm)
+               return 0;
+
        /* Workaround for WS SKU */
        if (adev->pdev->device == 0x7312 &&
            adev->pdev->revision == 0)
index d2fd44b..b124a5e 100644 (file)
@@ -3027,6 +3027,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 
 static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
 {
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *smc_pptable = table_context->driver_pptable;
+
+       /*
+        * Skip the MGpuFanBoost setting for those ASICs
+        * which do not support it
+        */
+       if (!smc_pptable->MGpuFanBoostLimitRpm)
+               return 0;
+
        return smu_cmn_send_smc_msg_with_param(smu,
                                               SMU_MSG_SetMGpuFanBoostLimitRpm,
                                               0,
index f2d46b7..232abbb 100644 (file)
@@ -314,9 +314,10 @@ int drm_master_open(struct drm_file *file_priv)
 void drm_master_release(struct drm_file *file_priv)
 {
        struct drm_device *dev = file_priv->minor->dev;
-       struct drm_master *master = file_priv->master;
+       struct drm_master *master;
 
        mutex_lock(&dev->master_mutex);
+       master = file_priv->master;
        if (file_priv->magic)
                idr_remove(&file_priv->master->magic_map, file_priv->magic);
 
index d273d1a..495a476 100644 (file)
@@ -118,17 +118,18 @@ int drm_getunique(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
        struct drm_unique *u = data;
-       struct drm_master *master = file_priv->master;
+       struct drm_master *master;
 
-       mutex_lock(&master->dev->master_mutex);
+       mutex_lock(&dev->master_mutex);
+       master = file_priv->master;
        if (u->unique_len >= master->unique_len) {
                if (copy_to_user(u->unique, master->unique, master->unique_len)) {
-                       mutex_unlock(&master->dev->master_mutex);
+                       mutex_unlock(&dev->master_mutex);
                        return -EFAULT;
                }
        }
        u->unique_len = master->unique_len;
-       mutex_unlock(&master->dev->master_mutex);
+       mutex_unlock(&dev->master_mutex);
 
        return 0;
 }
index 93f4d05..1e1cb24 100644 (file)
@@ -20,7 +20,6 @@ config DRM_I915
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
-       select IO_MAPPING
        select SYNC_FILE
        select IOSF_MBI
        select CRC32
index 02a003f..50cae01 100644 (file)
@@ -128,49 +128,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
        return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
 }
 
-/**
- * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
- * @intel_dp: Intel DP struct
- *
- * Read the LTTPR common and DPRX capabilities and switch to non-transparent
- * link training mode if any is detected and read the PHY capabilities for all
- * detected LTTPRs. In case of an LTTPR detection error or if the number of
- * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
- * transparent mode link training mode.
- *
- * Returns:
- *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
- *       DPRX capabilities are read out.
- *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- *       detection failure and the transparent LT mode was set. The DPRX
- *       capabilities are read out.
- *   <0  Reading out the DPRX capabilities failed.
- */
-int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
 {
        int lttpr_count;
-       bool ret;
        int i;
 
-       ret = intel_dp_read_lttpr_common_caps(intel_dp);
-
-       /* The DPTX shall read the DPRX caps after LTTPR detection. */
-       if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
-               intel_dp_reset_lttpr_common_caps(intel_dp);
-               return -EIO;
-       }
-
-       if (!ret)
-               return 0;
-
-       /*
-        * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
-        * at least 1.4.
-        */
-       if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
-               intel_dp_reset_lttpr_common_caps(intel_dp);
+       if (!intel_dp_read_lttpr_common_caps(intel_dp))
                return 0;
-       }
 
        lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
        /*
@@ -211,6 +175,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
 
        return lttpr_count;
 }
+
+/**
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+ * @intel_dp: Intel DP struct
+ *
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
+ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+ * transparent mode link training mode.
+ *
+ * Returns:
+ *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
+ *       DPRX capabilities are read out.
+ *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+ *       detection failure and the transparent LT mode was set. The DPRX
+ *       capabilities are read out.
+ *   <0  Reading out the DPRX capabilities failed.
+ */
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+{
+       int lttpr_count = intel_dp_init_lttpr(intel_dp);
+
+       /* The DPTX shall read the DPRX caps after LTTPR detection. */
+       if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+               intel_dp_reset_lttpr_common_caps(intel_dp);
+               return -EIO;
+       }
+
+       return lttpr_count;
+}
 EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
 
 static u8 dp_voltage_max(u8 preemph)
index f6fe5cb..8598a1c 100644 (file)
@@ -367,10 +367,11 @@ retry:
                goto err_unpin;
 
        /* Finally, remap it using the new GTT offset */
-       ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
-                       (vma->ggtt_view.partial.offset << PAGE_SHIFT),
-                       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
-                       min_t(u64, vma->size, area->vm_end - area->vm_start));
+       ret = remap_io_mapping(area,
+                              area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+                              min_t(u64, vma->size, area->vm_end - area->vm_start),
+                              &ggtt->iomap);
        if (ret)
                goto err_fence;
 
index 9ec9277..69e43bf 100644 (file)
@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 
 /* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap);
 int remap_io_sg(struct vm_area_struct *vma,
                unsigned long addr, unsigned long size,
                struct scatterlist *sgl, resource_size_t iobase);
index 9a777b0..666808c 100644 (file)
@@ -37,6 +37,17 @@ struct remap_pfn {
        resource_size_t iobase;
 };
 
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+       r->pfn++;
+
+       return 0;
+}
+
 #define use_dma(io) ((io) != -1)
 
 static inline unsigned long sgt_pfn(const struct remap_pfn *r)
@@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
        return 0;
 }
 
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap)
+{
+       struct remap_pfn r;
+       int err;
+
 #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+       GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+       /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+       r.mm = vma->vm_mm;
+       r.pfn = pfn;
+       r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+                         (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+       err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
+}
 
 /**
  * remap_io_sg - remap an IO mapping to userspace
index ee8e753..eae0abd 100644 (file)
@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
 
        for (n = 0; n < smoke[0].ncontexts; n++) {
                smoke[0].contexts[n] = live_context(i915, file);
-               if (!smoke[0].contexts[n]) {
-                       ret = -ENOMEM;
+               if (IS_ERR(smoke[0].contexts[n])) {
+                       ret = PTR_ERR(smoke[0].contexts[n]);
                        goto out_contexts;
                }
        }
index b3fd350..5275b27 100644 (file)
@@ -577,7 +577,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
         * porches and sync.
         */
        /* (ps/s) / (pixels/s) = ps/pixels */
-       pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
+       pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
        dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
                pclk);
 
index 453d8b4..07fcd12 100644 (file)
@@ -485,11 +485,12 @@ static int meson_probe_remote(struct platform_device *pdev,
 static void meson_drv_shutdown(struct platform_device *pdev)
 {
        struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
-       struct drm_device *drm = priv->drm;
 
-       DRM_DEBUG_DRIVER("\n");
-       drm_kms_helper_poll_fini(drm);
-       drm_atomic_helper_shutdown(drm);
+       if (!priv)
+               return;
+
+       drm_kms_helper_poll_fini(priv->drm);
+       drm_atomic_helper_shutdown(priv->drm);
 }
 
 static int meson_drv_probe(struct platform_device *pdev)
index b4d8e1b..f6c1b62 100644 (file)
@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
         * GPU registers so we need to add 0x1a800 to the register value on A630
         * to get the right value from PM4.
         */
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_start));
 
        /* Invalidate CCU depth and color */
@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_end));
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_end));
 
        /* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        OUT_RING(ring, submit->seqno);
 
        trace_msm_gpu_submit_flush(submit,
-               gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
-                       REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+                       REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
 
        a6xx_flush(gpu, ring);
 }
@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
        gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
 }
 
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+       A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+       A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+       A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       const u32 *regs = a6xx_protect;
+       unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+       BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+       BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+       if (adreno_is_a650(adreno_gpu)) {
+               regs = a650_protect;
+               count = ARRAY_SIZE(a650_protect);
+               count_max = 48;
+       }
+
+       /*
+        * Enable access protection to privileged registers, fault on an access
+        * protect violation and select the last span to protect from the start
+        * address all the way to the end of the register address space
+        */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+       for (i = 0; i < count - 1; i++)
+               gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+       /* last CP_PROTECT to have "infinite" length on the last entry */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
 static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
                rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
        gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
        gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
-               uavflagprd_inv >> 4 | lower_bit << 1);
+               uavflagprd_inv << 4 | lower_bit << 1);
        gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
 }
 
@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        }
 
        /* Protect registers from the CP */
-       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
-               A6XX_PROTECT_RDONLY(0x600, 0x51));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
-               A6XX_PROTECT_RDONLY(0xfc00, 0x3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
-               A6XX_PROTECT_RDONLY(0x0, 0x4f9));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
-               A6XX_PROTECT_RDONLY(0x501, 0xa));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
-               A6XX_PROTECT_RDONLY(0x511, 0x44));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
-               A6XX_PROTECT_RW(0xbe20, 0x11f3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
-                       A6XX_PROTECT_RDONLY(0x980, 0x4));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+       a6xx_set_cp_protect(gpu);
 
        /* Enable expanded apriv for targets that support it */
        if (gpu->hw_apriv) {
@@ -1211,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
        if (ret)
                return ret;
 
-       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+       if (a6xx_gpu->shadow_bo)
                for (i = 0; i < gpu->nr_rings; i++)
                        a6xx_gpu->shadow[i] = 0;
 
index ce0610c..bb544df 100644 (file)
@@ -44,7 +44,7 @@ struct a6xx_gpu {
  * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
  * registers starting at _reg.
  */
-#define A6XX_PROTECT_RW(_reg, _len) \
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
        ((1 << 31) | \
        (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
 
index 34bc935..6577788 100644 (file)
@@ -432,6 +432,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
        pll_freq += div_u64(tmp64, multiplier);
 
        vco_rate = pll_freq;
+       pll_10nm->vco_current_rate = vco_rate;
 
        DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
            pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
index e76ce40..6f96fba 100644 (file)
@@ -460,6 +460,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
        pll_freq += div_u64(tmp64, multiplier);
 
        vco_rate = pll_freq;
+       pll_7nm->vco_current_rate = vco_rate;
 
        DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
            pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
index 56df86e..369d91e 100644 (file)
@@ -1241,6 +1241,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
                to_msm_bo(obj)->vram_node = &vma->node;
 
+               /* Call chain get_pages() -> update_inactive() tries to
+                * access msm_obj->mm_list, but it is not initialized yet.
+                * To avoid NULL pointer dereference error, initialize
+                * mm_list to be empty.
+                */
+               INIT_LIST_HEAD(&msm_obj->mm_list);
+
                msm_gem_lock(obj);
                pages = get_pages(obj);
                msm_gem_unlock(obj);
index dfa9fdb..06bb24d 100644 (file)
@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        if (rdev->uvd.vcpu_bo == NULL)
                return -EINVAL;
 
-       memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+       memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
 
        size = radeon_bo_size(rdev->uvd.vcpu_bo);
        size -= rdev->uvd_fw->size;
@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        ptr = rdev->uvd.cpu_addr;
        ptr += rdev->uvd_fw->size;
 
-       memset(ptr, 0, size);
+       memset_io((void __iomem *)ptr, 0, size);
 
        return 0;
 }
index bbdfd5e..f75fb15 100644 (file)
@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                goto err_disable_clk_tmds;
        }
 
-       ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
+       ret = sun8i_hdmi_phy_get(hdmi, phy_node);
        of_node_put(phy_node);
        if (ret) {
                dev_err(dev, "Couldn't get the HDMI PHY\n");
@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
 
 cleanup_encoder:
        drm_encoder_cleanup(encoder);
-       sun8i_hdmi_phy_remove(hdmi);
 err_disable_clk_tmds:
        clk_disable_unprepare(hdmi->clk_tmds);
 err_assert_ctrl_reset:
@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
        struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
 
        dw_hdmi_unbind(hdmi->hdmi);
-       sun8i_hdmi_phy_remove(hdmi);
        clk_disable_unprepare(hdmi->clk_tmds);
        reset_control_assert(hdmi->rst_ctrl);
        gpiod_set_value(hdmi->ddc_en, 0);
@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
                .of_match_table = sun8i_dw_hdmi_dt_ids,
        },
 };
-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
+
+static int __init sun8i_dw_hdmi_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&sun8i_hdmi_phy_driver);
+       if (ret) {
+               platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+               return ret;
+       }
+
+       return ret;
+}
+
+static void __exit sun8i_dw_hdmi_exit(void)
+{
+       platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
+       platform_driver_unregister(&sun8i_hdmi_phy_driver);
+}
+
+module_init(sun8i_dw_hdmi_init);
+module_exit(sun8i_dw_hdmi_exit);
 
 MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
 MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
index d4b55af..74f6ed0 100644 (file)
@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
        struct gpio_desc                *ddc_en;
 };
 
+extern struct platform_driver sun8i_hdmi_phy_driver;
+
 static inline struct sun8i_dw_hdmi *
 encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
 {
        return container_of(encoder, struct sun8i_dw_hdmi, encoder);
 }
 
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
 
 void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
 void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
index 9994edf..c923970 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/delay.h>
 #include <linux/of_address.h>
+#include <linux/of_platform.h>
 
 #include "sun8i_dw_hdmi.h"
 
@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
        { /* sentinel */ }
 };
 
-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
+{
+       struct platform_device *pdev = of_find_device_by_node(node);
+       struct sun8i_hdmi_phy *phy;
+
+       if (!pdev)
+               return -EPROBE_DEFER;
+
+       phy = platform_get_drvdata(pdev);
+       if (!phy)
+               return -EPROBE_DEFER;
+
+       hdmi->phy = phy;
+
+       put_device(&pdev->dev);
+
+       return 0;
+}
+
+static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
-       struct device *dev = hdmi->dev;
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
        struct sun8i_hdmi_phy *phy;
        struct resource res;
        void __iomem *regs;
@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
                clk_prepare_enable(phy->clk_phy);
        }
 
-       hdmi->phy = phy;
+       platform_set_drvdata(pdev, phy);
 
        return 0;
 
@@ -728,9 +749,9 @@ err_put_clk_bus:
        return ret;
 }
 
-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
+static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
 {
-       struct sun8i_hdmi_phy *phy = hdmi->phy;
+       struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
 
        clk_disable_unprepare(phy->clk_mod);
        clk_disable_unprepare(phy->clk_bus);
@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
        clk_put(phy->clk_pll1);
        clk_put(phy->clk_mod);
        clk_put(phy->clk_bus);
+       return 0;
 }
+
+struct platform_driver sun8i_hdmi_phy_driver = {
+       .probe  = sun8i_hdmi_phy_probe,
+       .remove = sun8i_hdmi_phy_remove,
+       .driver = {
+               .name = "sun8i-hdmi-phy",
+               .of_match_table = sun8i_hdmi_phy_of_table,
+       },
+};
index 87df251..0cb8680 100644 (file)
@@ -25,7 +25,7 @@
 #include "trace.h"
 
 /* XXX move to include/uapi/drm/drm_fourcc.h? */
-#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
 
 struct reset_control;
 
index 79bff8b..bfae8a0 100644 (file)
@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
         * dGPU sector layout.
         */
        if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
-               base |= BIT(39);
+               base |= BIT_ULL(39);
 #endif
 
        tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
index 7b88261..0ea320c 100644 (file)
@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
 
                err = reset_control_assert(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to assert SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
        }
 
        err = clk_prepare_enable(sor->clk);
        if (err < 0) {
                dev_err(sor->dev, "failed to enable clock: %d\n", err);
-               return err;
+               goto rpm_put;
        }
 
        usleep_range(1000, 3000);
@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
                        dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
                                err);
                        clk_disable_unprepare(sor->clk);
-                       return err;
+                       goto rpm_put;
                }
 
                reset_control_release(sor->rst);
@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
        }
 
        return 0;
+
+rpm_put:
+       if (sor->rst)
+               pm_runtime_put(sor->dev);
+
+       return err;
 }
 
 static int tegra_sor_exit(struct host1x_client *client)
@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
                if (!sor->aux)
                        return -EPROBE_DEFER;
 
-               if (get_device(&sor->aux->ddc.dev)) {
-                       if (try_module_get(sor->aux->ddc.owner))
-                               sor->output.ddc = &sor->aux->ddc;
-                       else
-                               put_device(&sor->aux->ddc.dev);
-               }
+               if (get_device(sor->aux->dev))
+                       sor->output.ddc = &sor->aux->ddc;
        }
 
        if (!sor->aux) {
@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
 
        err = tegra_sor_parse_dt(sor);
        if (err < 0)
-               return err;
+               goto put_aux;
 
        err = tegra_output_probe(&sor->output);
-       if (err < 0)
-               return dev_err_probe(&pdev->dev, err,
-                                    "failed to probe output\n");
+       if (err < 0) {
+               dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+               goto put_aux;
+       }
 
        if (sor->ops && sor->ops->probe) {
                err = sor->ops->probe(sor);
@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sor);
        pm_runtime_enable(&pdev->dev);
 
-       INIT_LIST_HEAD(&sor->client.list);
+       host1x_client_init(&sor->client);
        sor->client.ops = &sor_client_ops;
        sor->client.dev = &pdev->dev;
 
-       err = host1x_client_register(&sor->client);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
-                       err);
-               goto rpm_disable;
-       }
-
        /*
         * On Tegra210 and earlier, provide our own implementation for the
         * pad output clock.
@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
                                      sor->index);
                if (!name) {
                        err = -ENOMEM;
-                       goto unregister;
+                       goto uninit;
                }
 
                err = host1x_client_resume(&sor->client);
                if (err < 0) {
                        dev_err(sor->dev, "failed to resume: %d\n", err);
-                       goto unregister;
+                       goto uninit;
                }
 
                sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
                err = PTR_ERR(sor->clk_pad);
                dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
                        err);
-               goto unregister;
+               goto uninit;
+       }
+
+       err = __host1x_client_register(&sor->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               goto uninit;
        }
 
        return 0;
 
-unregister:
-       host1x_client_unregister(&sor->client);
-rpm_disable:
+uninit:
+       host1x_client_exit(&sor->client);
        pm_runtime_disable(&pdev->dev);
 remove:
+       if (sor->aux)
+               sor->output.ddc = NULL;
+
        tegra_output_remove(&sor->output);
+put_aux:
+       if (sor->aux)
+               put_device(sor->aux->dev);
+
        return err;
 }
 
@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
 
+       if (sor->aux) {
+               put_device(sor->aux->dev);
+               sor->output.ddc = NULL;
+       }
+
        tegra_output_remove(&sor->output);
 
        return 0;
index cfd0b92..ebcffe7 100644 (file)
@@ -1172,7 +1172,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
                return -EBUSY;
 
-       if (!ttm_bo_get_unless_zero(bo)) {
+       if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
+           bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
+           bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+           !ttm_bo_get_unless_zero(bo)) {
                if (locked)
                        dma_resv_unlock(bo->base.resv);
                return -EBUSY;
index 510e3e0..3d9c62b 100644 (file)
@@ -143,14 +143,8 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
 
                for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
                        list_for_each_entry(bo, &man->lru[j], lru) {
-                               uint32_t num_pages;
+                               uint32_t num_pages = PFN_UP(bo->base.size);
 
-                               if (!bo->ttm ||
-                                   bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
-                                   bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)
-                                       continue;
-
-                               num_pages = bo->ttm->num_pages;
                                ret = ttm_bo_swapout(bo, ctx, gfp_flags);
                                /* ttm_bo_swapout has dropped the lru_lock */
                                if (!ret)
index bb5529a..948b3a5 100644 (file)
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
                if (!old_hvs_state->fifo_state[channel].in_use)
                        continue;
 
-               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
                if (ret)
                        drm_err(dev, "Timed out waiting for commit\n");
        }
index 46f69c5..218e371 100644 (file)
@@ -736,6 +736,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 EXPORT_SYMBOL(host1x_driver_unregister);
 
 /**
+ * __host1x_client_init() - initialize a host1x client
+ * @client: host1x client
+ * @key: lock class key for the client-specific mutex
+ */
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
+{
+       INIT_LIST_HEAD(&client->list);
+       __mutex_init(&client->lock, "host1x client lock", key);
+       client->usecount = 0;
+}
+EXPORT_SYMBOL(__host1x_client_init);
+
+/**
+ * host1x_client_exit() - uninitialize a host1x client
+ * @client: host1x client
+ */
+void host1x_client_exit(struct host1x_client *client)
+{
+       mutex_destroy(&client->lock);
+}
+EXPORT_SYMBOL(host1x_client_exit);
+
+/**
  * __host1x_client_register() - register a host1x client
  * @client: host1x client
  * @key: lock class key for the client-specific mutex
@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key)
+int __host1x_client_register(struct host1x_client *client)
 {
        struct host1x *host1x;
        int err;
 
-       INIT_LIST_HEAD(&client->list);
-       __mutex_init(&client->lock, "host1x client lock", key);
-       client->usecount = 0;
-
        mutex_lock(&devices_lock);
 
        list_for_each_entry(host1x, &devices, list) {
index 4bf263c..1605549 100644 (file)
@@ -93,11 +93,11 @@ menu "Special HID drivers"
        depends on HID
 
 config HID_A4TECH
-       tristate "A4 tech mice"
+       tristate "A4TECH mice"
        depends on HID
        default !EXPERT
        help
-       Support for A4 tech X5 and WOP-35 / Trust 450L mice.
+       Support for some A4TECH mice with two scroll wheels.
 
 config HID_ACCUTOUCH
        tristate "Accutouch touch device"
@@ -922,6 +922,21 @@ config HID_SAMSUNG
        help
        Support for Samsung InfraRed remote control or keyboards.
 
+config HID_SEMITEK
+       tristate "Semitek USB keyboards"
+       depends on HID
+       help
+       Support for Semitek USB keyboards that are not fully compliant
+       with the HID standard.
+
+       There are many variants, including:
+       - GK61, GK64, GK68, GK84, GK96, etc.
+       - SK61, SK64, SK68, SK84, SK96, etc.
+       - Dierya DK61/DK66
+       - Tronsmart TK09R
+       - Woo-dy
+       - X-Bows Nature/Knight
+
 config HID_SONY
        tristate "Sony PS2/3/4 accessories"
        depends on USB_HID
index 193431e..1ea1a7c 100644 (file)
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID_ROCCAT)    += hid-roccat.o hid-roccat-common.o \
 obj-$(CONFIG_HID_RMI)          += hid-rmi.o
 obj-$(CONFIG_HID_SAITEK)       += hid-saitek.o
 obj-$(CONFIG_HID_SAMSUNG)      += hid-samsung.o
+obj-$(CONFIG_HID_SEMITEK)      += hid-semitek.o
 obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
 obj-$(CONFIG_HID_SONY)         += hid-sony.o
 obj-$(CONFIG_HID_SPEEDLINK)    += hid-speedlink.o
index 2ab38b7..3589d99 100644 (file)
@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work)
        sensor_index = req_node->sensor_idx;
        report_id = req_node->report_id;
        node_type = req_node->report_type;
+       kfree(req_node);
 
        if (node_type == HID_FEATURE_REPORT) {
                report_size = get_feature_report(sensor_index, report_id,
@@ -142,7 +143,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
        int rc, i;
 
        dev = &privdata->pdev->dev;
-       cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+       cl_data = devm_kzalloc(dev, sizeof(*cl_data), GFP_KERNEL);
        if (!cl_data)
                return -ENOMEM;
 
@@ -175,12 +176,12 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                        rc = -EINVAL;
                        goto cleanup;
                }
-               cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL);
+               cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL);
                if (!cl_data->feature_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
                }
-               cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL);
+               cl_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL);
                if (!cl_data->input_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -189,7 +190,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                info.sensor_idx = cl_idx;
                info.dma_address = cl_data->sensor_dma_addr[i];
 
-               cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
+               cl_data->report_descr[i] =
+                       devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL);
                if (!cl_data->report_descr[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -214,11 +216,11 @@ cleanup:
                                          cl_data->sensor_virt_addr[i],
                                          cl_data->sensor_dma_addr[i]);
                }
-               kfree(cl_data->feature_report[i]);
-               kfree(cl_data->input_report[i]);
-               kfree(cl_data->report_descr[i]);
+               devm_kfree(dev, cl_data->feature_report[i]);
+               devm_kfree(dev, cl_data->input_report[i]);
+               devm_kfree(dev, cl_data->report_descr[i]);
        }
-       kfree(cl_data);
+       devm_kfree(dev, cl_data);
        return rc;
 }
 
@@ -241,6 +243,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
                                          cl_data->sensor_dma_addr[i]);
                }
        }
-       kfree(cl_data);
        return 0;
 }
index 4f98948..5ad1e7a 100644 (file)
@@ -162,9 +162,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
        int i;
 
        for (i = 0; i < cli_data->num_hid_devices; ++i) {
-               kfree(cli_data->feature_report[i]);
-               kfree(cli_data->input_report[i]);
-               kfree(cli_data->report_descr[i]);
                if (cli_data->hid_sensor_hubs[i]) {
                        kfree(cli_data->hid_sensor_hubs[i]->driver_data);
                        hid_destroy_device(cli_data->hid_sensor_hubs[i]);
index 3a8c4a5..2cbc32d 100644 (file)
@@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = {
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95),
+               .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { }
 };
 MODULE_DEVICE_TABLE(hid, a4_devices);
index 2ab22b9..fca8fc7 100644 (file)
@@ -79,10 +79,9 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_T100_KEYBOARD            BIT(6)
 #define QUIRK_T100CHI                  BIT(7)
 #define QUIRK_G752_KEYBOARD            BIT(8)
-#define QUIRK_T101HA_DOCK              BIT(9)
-#define QUIRK_T90CHI                   BIT(10)
-#define QUIRK_MEDION_E1239T            BIT(11)
-#define QUIRK_ROG_NKEY_KEYBOARD                BIT(12)
+#define QUIRK_T90CHI                   BIT(9)
+#define QUIRK_MEDION_E1239T            BIT(10)
+#define QUIRK_ROG_NKEY_KEYBOARD                BIT(11)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -335,7 +334,7 @@ static int asus_raw_event(struct hid_device *hdev,
        if (drvdata->quirks & QUIRK_MEDION_E1239T)
                return asus_e1239t_event(drvdata, data, size);
 
-       if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+       if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
                /*
                 * Skip these report ID, the device emits a continuous stream associated
                 * with the AURA mode it is in which looks like an 'echo'.
@@ -355,6 +354,16 @@ static int asus_raw_event(struct hid_device *hdev,
                                return -1;
                        }
                }
+               if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+                       /*
+                        * G713 and G733 send these codes on some keypresses, depending on
+                        * the key pressed it can trigger a shutdown event if not caught.
+                       */
+                       if(data[0] == 0x02 && data[1] == 0x30) {
+                               return -1;
+                       }
+               }
+
        }
 
        return 0;
@@ -1072,11 +1081,6 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
-       /* use hid-multitouch for T101HA touchpad */
-       if (id->driver_data & QUIRK_T101HA_DOCK &&
-           hdev->collection->usage == HID_GD_MOUSE)
-               return -ENODEV;
-
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "Asus hw start failed: %d\n", ret);
@@ -1230,8 +1234,6 @@ static const struct hid_device_id asus_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
-               USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
@@ -1239,6 +1241,12 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
                QUIRK_MEDION_E1239T },
+       /*
+        * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+        * part, while letting hid-multitouch.c handle the touchpad.
+        */
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+               USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 0ae9f6d..0de2788 100644 (file)
@@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
        case BUS_I2C:
                bus = "I2C";
                break;
+       case BUS_VIRTUAL:
+               bus = "VIRTUAL";
+               break;
        default:
                bus = "<UNKNOWN>";
        }
@@ -2588,7 +2591,6 @@ int hid_check_keys_pressed(struct hid_device *hid)
 
        return 0;
 }
-
 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
 
 static int __init hid_init(void)
index 59f8d71..a311fb8 100644 (file)
@@ -930,6 +930,9 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_APPSELECT] = "AppSelect",
        [KEY_SCREENSAVER] = "ScreenSaver",
        [KEY_VOICECOMMAND] = "VoiceCommand",
+       [KEY_ASSISTANT] = "Assistant",
+       [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
+       [KEY_EMOJI_PICKER] = "EmojiPicker",
        [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
        [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
        [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
index a575160..f43a840 100644 (file)
@@ -201,7 +201,7 @@ struct ft260_i2c_write_request_report {
        u8 address;             /* 7-bit I2C address */
        u8 flag;                /* I2C transaction condition */
        u8 length;              /* data payload length */
-       u8 data[60];            /* data payload */
+       u8 data[FT260_WR_DATA_MAX]; /* data payload */
 } __packed;
 
 struct ft260_i2c_read_request_report {
@@ -249,7 +249,10 @@ static int ft260_hid_feature_report_get(struct hid_device *hdev,
 
        ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
                                 HID_REQ_GET_REPORT);
-       memcpy(data, buf, len);
+       if (likely(ret == len))
+               memcpy(data, buf, len);
+       else if (ret >= 0)
+               ret = -EIO;
        kfree(buf);
        return ret;
 }
@@ -298,7 +301,7 @@ static int ft260_xfer_status(struct ft260_device *dev)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
                                           (u8 *)&report, sizeof(report));
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                hid_err(hdev, "failed to retrieve status: %d\n", ret);
                return ret;
        }
@@ -429,6 +432,9 @@ static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
        struct ft260_i2c_write_request_report *rep =
                (struct ft260_i2c_write_request_report *)dev->write_buf;
 
+       if (data_len >= sizeof(rep->data))
+               return -EINVAL;
+
        rep->address = addr;
        rep->data[0] = cmd;
        rep->length = data_len + 1;
@@ -721,10 +727,9 @@ static int ft260_get_system_config(struct hid_device *hdev,
 
        ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
                                           (u8 *)cfg, len);
-       if (ret != len) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve system status\n");
-               if (ret >= 0)
-                       return -EIO;
+               return ret;
        }
        return 0;
 }
@@ -777,8 +782,8 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
 }
@@ -789,8 +794,8 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
 }
@@ -941,10 +946,8 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
                                           (u8 *)&version, sizeof(version));
-       if (ret != sizeof(version)) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve chip version\n");
-               if (ret >= 0)
-                       ret = -EIO;
                goto err_hid_close;
        }
 
index 898871c..29ccb0a 100644 (file)
@@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
        { }
 };
+MODULE_DEVICE_TABLE(hid, gt683r_led_id);
 
 static void gt683r_brightness_set(struct led_classdev *led_cdev,
                                enum led_brightness brightness)
index 84b8da3..b84a0a1 100644 (file)
@@ -26,6 +26,7 @@
 #define USB_DEVICE_ID_A4TECH_WCP32PU   0x0006
 #define USB_DEVICE_ID_A4TECH_X5_005D   0x000a
 #define USB_DEVICE_ID_A4TECH_RP_649    0x001a
+#define USB_DEVICE_ID_A4TECH_NB_95     0x022b
 
 #define USB_VENDOR_ID_AASHIMA          0x06d6
 #define USB_DEVICE_ID_AASHIMA_GAMEPAD  0x0025
 
 #define USB_VENDOR_ID_CORSAIR          0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K90      0x1b02
-
-#define USB_VENDOR_ID_CORSAIR           0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K70R      0x1b09
 #define USB_DEVICE_ID_CORSAIR_K95RGB    0x1b11
 #define USB_DEVICE_ID_CORSAIR_M65RGB    0x1b12
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 #define USB_DEVICE_ID_LENOVO_X1_TAB3   0x60b5
+#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E    0x600e
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D     0x608d
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019     0x6019
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E     0x602e
 #define USB_DEVICE_ID_SAITEK_X52       0x075c
 #define USB_DEVICE_ID_SAITEK_X52_2     0x0255
 #define USB_DEVICE_ID_SAITEK_X52_PRO   0x0762
+#define USB_DEVICE_ID_SAITEK_X65       0x0b6a
 
 #define USB_VENDOR_ID_SAMSUNG          0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD      0x0023
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2     0x0027
 
+#define USB_VENDOR_ID_SEMITEK  0x1ea7
+#define USB_DEVICE_ID_SEMITEK_KEYBOARD 0x0907
+
 #define USB_VENDOR_ID_SENNHEISER       0x1395
 #define USB_DEVICE_ID_SENNHEISER_BTD500USB     0x002c
 
 #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A      0x2819
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A      0x6e21
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
index 18f5e28..abbfa91 100644 (file)
@@ -964,6 +964,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
+
+               case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);    break;
+
                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
                case 0x0e2: map_key_clear(KEY_MUTE);            break;
                case 0x0e5: map_key_clear(KEY_BASSBOOST);       break;
index d598094..fee4e54 100644 (file)
@@ -1263,6 +1263,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
        int status;
 
        long flags = (long) data[2];
+       *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
 
        if (flags & 0x80)
                switch (flags & 0x07) {
index 2bb473d..8bcaee4 100644 (file)
@@ -693,7 +693,7 @@ static int magicmouse_probe(struct hid_device *hdev,
        if (id->vendor == USB_VENDOR_ID_APPLE &&
            id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
            hdev->type != HID_TYPE_USBMOUSE)
-               return 0;
+               return -ENODEV;
 
        msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
        if (msc == NULL) {
@@ -779,7 +779,10 @@ err_stop_hw:
 static void magicmouse_remove(struct hid_device *hdev)
 {
        struct magicmouse_sc *msc = hid_get_drvdata(hdev);
-       cancel_delayed_work_sync(&msc->work);
+
+       if (msc)
+               cancel_delayed_work_sync(&msc->work);
+
        hid_hw_stop(hdev);
 }
 
index 9d9f3e1..2e4fb76 100644 (file)
@@ -70,6 +70,7 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_WIN8_PTP_BUTTONS      BIT(18)
 #define MT_QUIRK_SEPARATE_APP_REPORT   BIT(19)
 #define MT_QUIRK_FORCE_MULTI_INPUT     BIT(20)
+#define MT_QUIRK_DISABLE_WAKEUP                BIT(21)
 
 #define MT_INPUTMODE_TOUCHSCREEN       0x02
 #define MT_INPUTMODE_TOUCHPAD          0x03
@@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
 #define MT_CLS_EXPORT_ALL_INPUTS               0x0013
 /* reserved                                    0x0014 */
 #define MT_CLS_WIN_8_FORCE_MULTI_INPUT         0x0015
+#define MT_CLS_WIN_8_DISABLE_WAKEUP            0x0016
 
 /* vendor specific classes */
 #define MT_CLS_3M                              0x0101
@@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = {
                        MT_QUIRK_WIN8_PTP_BUTTONS |
                        MT_QUIRK_FORCE_MULTI_INPUT,
                .export_all_inputs = true },
+       { .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               .quirks = MT_QUIRK_ALWAYS_VALID |
+                       MT_QUIRK_IGNORE_DUPLICATES |
+                       MT_QUIRK_HOVERING |
+                       MT_QUIRK_CONTACT_CNT_ACCURATE |
+                       MT_QUIRK_STICKY_FINGERS |
+                       MT_QUIRK_WIN8_PTP_BUTTONS |
+                       MT_QUIRK_DISABLE_WAKEUP,
+               .export_all_inputs = true },
 
        /*
         * vendor specific classes
@@ -604,9 +615,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
                if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
                        continue;
 
-               for (n = 0; n < field->report_count; n++) {
-                       if (field->usage[n].hid == HID_DG_CONTACTID)
-                               rdata->is_mt_collection = true;
+               if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
+                       for (n = 0; n < field->report_count; n++) {
+                               if (field->usage[n].hid == HID_DG_CONTACTID) {
+                                       rdata->is_mt_collection = true;
+                                       break;
+                               }
+                       }
                }
        }
 
@@ -759,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        return 1;
                case HID_DG_CONFIDENCE:
                        if ((cls->name == MT_CLS_WIN_8 ||
-                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
+                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
+                            cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
                                (field->application == HID_DG_TOUCHPAD ||
                                 field->application == HID_DG_TOUCHSCREEN))
                                app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1576,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
                /* we do not set suffix = "Touchscreen" */
                hi->input->name = hdev->name;
                break;
-       case HID_DG_STYLUS:
-               /* force BTN_STYLUS to allow tablet matching in udev */
-               __set_bit(BTN_STYLUS, hi->input->keybit);
-               break;
        case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
                suffix = "Custom Media Keys";
                break;
+       case HID_DG_STYLUS:
+               /* force BTN_STYLUS to allow tablet matching in udev */
+               __set_bit(BTN_STYLUS, hi->input->keybit);
+               fallthrough;
        case HID_DG_PEN:
                suffix = "Stylus";
                break;
@@ -1749,8 +1765,14 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 #ifdef CONFIG_PM
 static int mt_suspend(struct hid_device *hdev, pm_message_t state)
 {
+       struct mt_device *td = hid_get_drvdata(hdev);
+
        /* High latency is desirable for power savings during S3/S0ix */
-       mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+       if (td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP)
+               mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+       else
+               mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+
        return 0;
 }
 
@@ -1809,6 +1831,12 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
                        USB_DEVICE_ID_ANTON_TOUCH_PAD) },
 
+       /* Asus T101HA */
+       { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+                          USB_VENDOR_ID_ASUSTEK,
+                          USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
+
        /* Asus T304UA */
        { .driver_data = MT_CLS_ASUS,
                HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
index 3dd6f15..51b39bd 100644 (file)
@@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
@@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
@@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
new file mode 100644 (file)
index 0000000..ba6607d
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  HID driver for Semitek keyboards
+ *
+ *  Copyright (c) 2021 Benjamin Moody
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+                                  unsigned int *rsize)
+{
+       /* In the report descriptor for interface 2, fix the incorrect
+          description of report ID 0x04 (the report contains a
+          bitmask, not an array of keycodes.) */
+       if (*rsize == 0xcb && rdesc[0x83] == 0x81 && rdesc[0x84] == 0x00) {
+               hid_info(hdev, "fixing up Semitek report descriptor\n");
+               rdesc[0x84] = 0x02;
+       }
+       return rdesc;
+}
+
+static const struct hid_device_id semitek_devices[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_SEMITEK, USB_DEVICE_ID_SEMITEK_KEYBOARD) },
+       { }
+};
+MODULE_DEVICE_TABLE(hid, semitek_devices);
+
+static struct hid_driver semitek_driver = {
+       .name = "semitek",
+       .id_table = semitek_devices,
+       .report_fixup = semitek_report_fixup,
+};
+module_hid_driver(semitek_driver);
+
+MODULE_LICENSE("GPL");
index 2e66621..32c2306 100644 (file)
@@ -387,7 +387,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
        struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
        int index, field_index, usage;
        char name[HID_CUSTOM_NAME_LENGTH];
-       int value;
+       int value, ret;
 
        if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
                   name) == 3) {
@@ -403,8 +403,10 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
 
                report_id = sensor_inst->fields[field_index].attribute.
                                                                report_id;
-               sensor_hub_set_feature(sensor_inst->hsdev, report_id,
-                                      index, sizeof(value), &value);
+               ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+                                            index, sizeof(value), &value);
+               if (ret)
+                       return ret;
        } else
                return -EINVAL;
 
index 95cf88f..6abd3e2 100644 (file)
@@ -209,16 +209,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        buffer_size = buffer_size / sizeof(__s32);
        if (buffer_size) {
                for (i = 0; i < buffer_size; ++i) {
-                       hid_set_field(report->field[field_index], i,
-                                     (__force __s32)cpu_to_le32(*buf32));
+                       ret = hid_set_field(report->field[field_index], i,
+                                           (__force __s32)cpu_to_le32(*buf32));
+                       if (ret)
+                               goto done_proc;
+
                        ++buf32;
                }
        }
        if (remaining_bytes) {
                value = 0;
                memcpy(&value, (u8 *)buf32, remaining_bytes);
-               hid_set_field(report->field[field_index], i,
-                             (__force __s32)cpu_to_le32(value));
+               ret = hid_set_field(report->field[field_index], i,
+                                   (__force __s32)cpu_to_le32(value));
+               if (ret)
+                       goto done_proc;
        }
        hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
        hid_hw_wait(hsdev->hdev);
index 2e452c6..f643b1c 100644 (file)
@@ -312,7 +312,7 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
        }
 
        tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
-       if (!tm_wheel->model_request) {
+       if (!tm_wheel->change_request) {
                ret = -ENOMEM;
                goto error5;
        }
index 9993133..4647461 100644 (file)
@@ -45,6 +45,7 @@
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 #define I2C_HID_QUIRK_RESET_ON_RESUME          BIT(5)
 #define I2C_HID_QUIRK_BAD_INPUT_SIZE           BIT(6)
+#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET    BIT(7)
 
 
 /* flags */
@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_RESET_ON_RESUME },
        { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
                I2C_HID_QUIRK_BAD_INPUT_SIZE },
+       /*
+        * Sending the wakeup after reset actually break ELAN touchscreen controller
+        */
+       { USB_VENDOR_ID_ELAN, HID_ANY_ID,
+                I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
        { 0, 0 }
 };
 
@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        }
 
        /* At least some SIS devices need this after reset */
-       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
+               ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
 
 out_unlock:
        mutex_unlock(&ihid->reset_lock);
@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
        hid->product = le16_to_cpu(ihid->hdesc.wProductID);
 
-       snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
-                client->name, hid->vendor, hid->product);
+       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+                client->name, (u16)hid->vendor, (u16)hid->product);
        strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
 
        ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
index 21b87e4..07e3cbc 100644 (file)
@@ -28,6 +28,8 @@
 #define EHL_Ax_DEVICE_ID       0x4BB3
 #define TGL_LP_DEVICE_ID       0xA0FC
 #define TGL_H_DEVICE_ID                0x43FC
+#define ADL_S_DEVICE_ID                0x7AF8
+#define ADL_P_DEVICE_ID                0x51FC
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 06081cf..a6d5173 100644 (file)
@@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index 7b27ec3..5571e74 100644 (file)
@@ -168,9 +168,9 @@ int surface_hid_device_add(struct surface_hid_device *shid)
 
        shid->hid->dev.parent = shid->dev;
        shid->hid->bus = BUS_HOST;
-       shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
-       shid->hid->product = cpu_to_le16(shid->attrs.product);
-       shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+       shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor);
+       shid->hid->product = get_unaligned_le16(&shid->attrs.product);
+       shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version);
        shid->hid->country = shid->hid_desc.country_code;
 
        snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
index 86257ce..4e90773 100644 (file)
@@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
        raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
        dir = usbhid->ctrl[usbhid->ctrltail].dir;
 
-       len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       len = hid_report_len(report);
        if (dir == USB_DIR_OUT) {
                usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
                usbhid->urbctrl->transfer_buffer_length = len;
index ea126c5..3b4ee21 100644 (file)
@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
 
        if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
            pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
+               error = -EPERM;
                hid_notice(hid,
                           "device does not support device managed pool\n");
                goto fail;
index 02298b8..731d511 100644 (file)
@@ -771,6 +771,16 @@ static int corsairpsu_raw_event(struct hid_device *hdev, struct hid_report *repo
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int corsairpsu_resume(struct hid_device *hdev)
+{
+       struct corsairpsu_data *priv = hid_get_drvdata(hdev);
+
+       /* some PSUs turn off the microcontroller during standby, so a reinit is required */
+       return corsairpsu_init(priv);
+}
+#endif
+
 static const struct hid_device_id corsairpsu_idtable[] = {
        { HID_USB_DEVICE(0x1b1c, 0x1c03) }, /* Corsair HX550i */
        { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
@@ -793,6 +803,10 @@ static struct hid_driver corsairpsu_driver = {
        .probe          = corsairpsu_probe,
        .remove         = corsairpsu_remove,
        .raw_event      = corsairpsu_raw_event,
+#ifdef CONFIG_PM
+       .resume         = corsairpsu_resume,
+       .reset_resume   = corsairpsu_resume,
+#endif
 };
 module_hid_driver(corsairpsu_driver);
 
index 2970892..f2221ca 100644 (file)
@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
-       if (disallow_fan_support && index >= 8)
+       if (disallow_fan_support && index >= 20)
                return 0;
        if (disallow_fan_type_call &&
-           (index == 9 || index == 12 || index == 15))
+           (index == 21 || index == 25 || index == 28))
                return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
index e248424..aec294c 100644 (file)
@@ -37,6 +37,8 @@ struct fsp3y_data {
        struct pmbus_driver_info info;
        int chip;
        int page;
+
+       bool vout_linear_11;
 };
 
 #define to_fsp3y_data(x) container_of(x, struct fsp3y_data, info)
@@ -108,11 +110,9 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
        int rv;
 
        /*
-        * YH5151-E outputs vout in linear11. The conversion is done when
-        * reading. Here, we have to inject pmbus_core with the correct
-        * exponent (it is -6).
+        * Inject an exponent for non-compliant YH5151-E.
         */
-       if (data->chip == yh5151e && reg == PMBUS_VOUT_MODE)
+       if (data->vout_linear_11 && reg == PMBUS_VOUT_MODE)
                return 0x1A;
 
        rv = set_page(client, page);
@@ -161,10 +161,9 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase,
                return rv;
 
        /*
-        * YH-5151E is non-compliant and outputs output voltages in linear11
-        * instead of linear16.
+        * Handle YH-5151E non-compliant linear11 vout voltage.
         */
-       if (data->chip == yh5151e && reg == PMBUS_READ_VOUT)
+       if (data->vout_linear_11 && reg == PMBUS_READ_VOUT)
                rv = sign_extend32(rv, 10) & 0xffff;
 
        return rv;
@@ -256,6 +255,25 @@ static int fsp3y_probe(struct i2c_client *client)
 
        data->info = fsp3y_info[data->chip];
 
+       /*
+        * YH-5151E sometimes reports vout in linear11 and sometimes in
+        * linear16. This depends on the exact individual piece of hardware. One
+        * YH-5151E can use linear16 and another might use linear11 instead.
+        *
+        * The format can be recognized by reading VOUT_MODE - if it doesn't
+        * report a valid exponent, then vout uses linear11. Otherwise, the
+        * device is compliant and uses linear16.
+        */
+       data->vout_linear_11 = false;
+       if (data->chip == yh5151e) {
+               rv = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+               if (rv < 0)
+                       return rv;
+
+               if (rv == 0xFF)
+                       data->vout_linear_11 = true;
+       }
+
        return pmbus_do_probe(client, &data->info);
 }
 
index 40597a9..1a8caff 100644 (file)
@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
                info->read_word_data = raa_dmpvr2_read_word_data;
                break;
        case raa_dmpvr2_2rail_nontc:
-               info->func[0] &= ~PMBUS_HAVE_TEMP;
-               info->func[1] &= ~PMBUS_HAVE_TEMP;
+               info->func[0] &= ~PMBUS_HAVE_TEMP3;
+               info->func[1] &= ~PMBUS_HAVE_TEMP3;
                fallthrough;
        case raa_dmpvr2_2rail:
                info->pages = 2;
index b6e8b20..fa298b4 100644 (file)
@@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client)
                dev_err(&client->dev, "Failed to read Manufacturer ID\n");
                return ret;
        }
-       if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+       if (ret != 6 || strncmp(buf, "DELTA", 5)) {
                buf[ret] = '\0';
                dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
                return -ENODEV;
index 25aac40..9198779 100644 (file)
@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
 
        scpi_scale_reading(&value, sensor);
 
+       /*
+        * Temperature sensor values are treated as signed values based on
+        * observation even though that is not explicitly specified, and
+        * because an unsigned u64 temperature does not really make practical
+        * sense especially when the temperature is below zero degrees Celsius.
+        */
+       if (sensor->info.class == TEMPERATURE)
+               return sprintf(buf, "%lld\n", (s64)value);
+
        return sprintf(buf, "%llu\n", value);
 }
 
index c2484f1..8bd6435 100644 (file)
 #define POWER_ENABLE                   0x19
 #define TPS23861_NUM_PORTS             4
 
+#define TPS23861_GENERAL_MASK_1                0x17
+#define TPS23861_CURRENT_SHUNT_MASK    BIT(0)
+
 #define TEMPERATURE_LSB                        652 /* 0.652 degrees Celsius */
 #define VOLTAGE_LSB                    3662 /* 3.662 mV */
 #define SHUNT_RESISTOR_DEFAULT         255000 /* 255 mOhm */
-#define CURRENT_LSB_255                        62260 /* 62.260 uA */
-#define CURRENT_LSB_250                        61039 /* 61.039 uA */
+#define CURRENT_LSB_250                        62260 /* 62.260 uA */
+#define CURRENT_LSB_255                        61039 /* 61.039 uA */
 #define RESISTANCE_LSB                 110966 /* 11.0966 Ohm*/
 #define RESISTANCE_LSB_LOW             157216 /* 15.7216 Ohm*/
 
@@ -117,6 +120,7 @@ struct tps23861_data {
 static struct regmap_config tps23861_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .max_register = 0x6f,
 };
 
 static int tps23861_read_temp(struct tps23861_data *data, long *val)
@@ -560,6 +564,15 @@ static int tps23861_probe(struct i2c_client *client)
        else
                data->shunt_resistor = SHUNT_RESISTOR_DEFAULT;
 
+       if (data->shunt_resistor == SHUNT_RESISTOR_DEFAULT)
+               regmap_clear_bits(data->regmap,
+                                 TPS23861_GENERAL_MASK_1,
+                                 TPS23861_CURRENT_SHUNT_MASK);
+       else
+               regmap_set_bits(data->regmap,
+                               TPS23861_GENERAL_MASK_1,
+                               TPS23861_CURRENT_SHUNT_MASK);
+
        hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
                                                         data, &tps23861_chip_info,
                                                         NULL);
index 281a65d..10acece 100644 (file)
@@ -647,7 +647,7 @@ config I2C_HIGHLANDER
 
 config I2C_HISI
        tristate "HiSilicon I2C controller"
-       depends on ARM64 || COMPILE_TEST
+       depends on (ARM64 && ACPI) || COMPILE_TEST
        help
          Say Y here if you want to have Hisilicon I2C controller support
          available on the Kunpeng Server.
index 4d12e3d..55a9e93 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  *     i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge
  *
  *     Copyright (C) 2004 Patrick Mochel
index 7d62cbd..354cf7e 100644 (file)
@@ -55,7 +55,7 @@
 #define ALTR_I2C_XFER_TIMEOUT  (msecs_to_jiffies(250))
 
 /**
- * altr_i2c_dev - I2C device context
+ * struct altr_i2c_dev - I2C device context
  * @base: pointer to register struct
  * @msg: pointer to current message
  * @msg_len: number of bytes transferred in msg
@@ -172,7 +172,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
        altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
 }
 
-/**
+/*
  * altr_i2c_transfer - On the last byte to be transmitted, send
  * a Stop bit on the last byte.
  */
@@ -185,7 +185,7 @@ static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data)
                writel(data, idev->base + ALTR_I2C_TFR_CMD);
 }
 
-/**
+/*
  * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of
  * transfer. Send a Stop bit on the last byte.
  */
@@ -201,9 +201,8 @@ static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev)
        }
 }
 
-/**
+/*
  * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
- * @return: Number of bytes left to transfer.
  */
 static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev)
 {
index c1bbc4c..66aafa7 100644 (file)
@@ -144,7 +144,7 @@ enum cdns_i2c_mode {
 };
 
 /**
- * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode
+ * enum cdns_i2c_slave_state - Slave state when I2C is operating in slave mode
  *
  * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle
  * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master
index 13be1d6..9b08bb5 100644 (file)
@@ -165,7 +165,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 }
 
 /**
- * i2c_dw_init() - Initialize the designware I2C master hardware
+ * i2c_dw_init_master() - Initialize the designware I2C master hardware
  * @dev: device private data
  *
  * This functions configures and enables the I2C master.
index 843b31a..321b277 100644 (file)
@@ -148,7 +148,7 @@ struct i2c_algo_pch_data {
 
 /**
  * struct adapter_info - This structure holds the adapter information for the
                       PCH i2c controller
*                      PCH i2c controller
  * @pch_data:          stores a list of i2c_algo_pch_data
  * @pch_i2c_suspended: specifies whether the system is suspended or not
  *                     perhaps with more lines and words.
@@ -358,6 +358,7 @@ static void pch_i2c_repstart(struct i2c_algo_pch_data *adap)
 /**
  * pch_i2c_writebytes() - write data to I2C bus in normal mode
  * @i2c_adap:  Pointer to the struct i2c_adapter.
+ * @msgs:      Pointer to the i2c message structure.
  * @last:      specifies whether last message or not.
  *             In the case of compound mode it will be 1 for last message,
  *             otherwise 0.
index 99d4467..f9e1c2c 100644 (file)
@@ -395,11 +395,9 @@ static int i801_check_post(struct i801_priv *priv, int status)
                dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
                /* try to stop the current command */
                dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
-               outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
-                      SMBHSTCNT(priv));
+               outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
                usleep_range(1000, 2000);
-               outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
-                      SMBHSTCNT(priv));
+               outb_p(0, SMBHSTCNT(priv));
 
                /* Check if it worked */
                status = inb_p(SMBHSTSTS(priv));
index c8c422e..5dae7ca 100644 (file)
@@ -123,7 +123,6 @@ static int icy_probe(struct zorro_dev *z,
 {
        struct icy_i2c *i2c;
        struct i2c_algo_pcf_data *algo_data;
-       struct fwnode_handle *new_fwnode;
        struct i2c_board_info ltc2990_info = {
                .type           = "ltc2990",
                .swnode         = &icy_ltc2990_node,
index 30d9e89..dcca9c2 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/fsl_devices.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
@@ -45,6 +46,7 @@
 #define CCR_MTX  0x10
 #define CCR_TXAK 0x08
 #define CCR_RSTA 0x04
+#define CCR_RSVD 0x02
 
 #define CSR_MCF  0x80
 #define CSR_MAAS 0x40
@@ -97,7 +99,7 @@ struct mpc_i2c {
        u32 block;
        int rc;
        int expect_rxack;
-
+       bool has_errata_A004447;
 };
 
 struct mpc_i2c_divider {
@@ -136,6 +138,75 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c)
        }
 }
 
+static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask)
+{
+       void __iomem *addr = i2c->base + MPC_I2C_SR;
+       u8 val;
+
+       return readb_poll_timeout(addr, val, val & mask, 0, 100);
+}
+
+/*
+ * Workaround for Erratum A004447. From the P2040CE Rev Q
+ *
+ * 1.  Set up the frequency divider and sampling rate.
+ * 2.  I2CCR - a0h
+ * 3.  Poll for I2CSR[MBB] to get set.
+ * 4.  If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to
+ *     step 5. If MAL is not set, then go to step 13.
+ * 5.  I2CCR - 00h
+ * 6.  I2CCR - 22h
+ * 7.  I2CCR - a2h
+ * 8.  Poll for I2CSR[MBB] to get set.
+ * 9.  Issue read to I2CDR.
+ * 10. Poll for I2CSR[MIF] to be set.
+ * 11. I2CCR - 82h
+ * 12. Workaround complete. Skip the next steps.
+ * 13. Issue read to I2CDR.
+ * 14. Poll for I2CSR[MIF] to be set.
+ * 15. I2CCR - 80h
+ */
+static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c)
+{
+       int ret;
+       u32 val;
+
+       writeccr(i2c, CCR_MEN | CCR_MSTA);
+       ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+       if (ret) {
+               dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+               return;
+       }
+
+       val = readb(i2c->base + MPC_I2C_SR);
+
+       if (val & CSR_MAL) {
+               writeccr(i2c, 0x00);
+               writeccr(i2c, CCR_MSTA | CCR_RSVD);
+               writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD);
+               ret = i2c_mpc_wait_sr(i2c, CSR_MBB);
+               if (ret) {
+                       dev_err(i2c->dev, "timeout waiting for CSR_MBB\n");
+                       return;
+               }
+               val = readb(i2c->base + MPC_I2C_DR);
+               ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+               if (ret) {
+                       dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+                       return;
+               }
+               writeccr(i2c, CCR_MEN | CCR_RSVD);
+       } else {
+               val = readb(i2c->base + MPC_I2C_DR);
+               ret = i2c_mpc_wait_sr(i2c, CSR_MIF);
+               if (ret) {
+                       dev_err(i2c->dev, "timeout waiting for CSR_MIF\n");
+                       return;
+               }
+               writeccr(i2c, CCR_MEN);
+       }
+}
+
 #if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
 static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
        {20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
@@ -670,7 +741,10 @@ static int fsl_i2c_bus_recovery(struct i2c_adapter *adap)
 {
        struct mpc_i2c *i2c = i2c_get_adapdata(adap);
 
-       mpc_i2c_fixup(i2c);
+       if (i2c->has_errata_A004447)
+               mpc_i2c_fixup_A004447(i2c);
+       else
+               mpc_i2c_fixup(i2c);
 
        return 0;
 }
@@ -767,6 +841,9 @@ static int fsl_i2c_probe(struct platform_device *op)
        }
        dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
 
+       if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447"))
+               i2c->has_errata_A004447 = true;
+
        i2c->adap = mpc_ops;
        scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
                  "MPC adapter (%s)", of_node_full_name(op->dev.of_node));
index 5ddfa4e..4e9fb6b 100644 (file)
@@ -479,6 +479,11 @@ static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
 static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
+       u16 intr_stat_reg;
+
+       mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
+       intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+       mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT);
 
        if (i2c->dev_comp->apdma_sync) {
                writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
index dc77e1c..a2d12a5 100644 (file)
@@ -159,7 +159,7 @@ struct i2c_nmk_client {
  * @clk_freq: clock frequency for the operation mode
  * @tft: Tx FIFO Threshold in bytes
  * @rft: Rx FIFO Threshold in bytes
- * @timeout Slave response timeout (ms)
+ * @timeout: Slave response timeout (ms)
  * @sm: speed mode
  * @stop: stop condition.
  * @xfer_complete: acknowledge completion for a I2C message.
index 273222e..a0af027 100644 (file)
@@ -250,7 +250,7 @@ static irqreturn_t ocores_isr(int irq, void *dev_id)
 }
 
 /**
- * Process timeout event
+ * ocores_process_timeout() - Process timeout event
  * @i2c: ocores I2C device instance
  */
 static void ocores_process_timeout(struct ocores_i2c *i2c)
@@ -264,7 +264,7 @@ static void ocores_process_timeout(struct ocores_i2c *i2c)
 }
 
 /**
- * Wait until something change in a given register
+ * ocores_wait() - Wait until something change in a given register
  * @i2c: ocores I2C device instance
  * @reg: register to query
  * @mask: bitmask to apply on register value
@@ -296,7 +296,7 @@ static int ocores_wait(struct ocores_i2c *i2c,
 }
 
 /**
- * Wait until is possible to process some data
+ * ocores_poll_wait() - Wait until is possible to process some data
  * @i2c: ocores I2C device instance
  *
  * Used when the device is in polling mode (interrupts disabled).
@@ -334,7 +334,7 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
 }
 
 /**
- * It handles an IRQ-less transfer
+ * ocores_process_polling() - It handles an IRQ-less transfer
  * @i2c: ocores I2C device instance
  *
  * Even if IRQ are disabled, the I2C OpenCore IP behavior is exactly the same
index 8c4ec7f..50f21cd 100644 (file)
@@ -138,7 +138,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
 /**
  * i2c_pnx_start - start a device
  * @slave_addr:                slave address
- * @adap:              pointer to adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Generate a START signal in the desired mode.
  */
@@ -194,7 +194,7 @@ static int i2c_pnx_start(unsigned char slave_addr,
 
 /**
  * i2c_pnx_stop - stop a device
- * @adap:              pointer to I2C adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Generate a STOP signal to terminate the master transaction.
  */
@@ -223,7 +223,7 @@ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data)
 
 /**
  * i2c_pnx_master_xmit - transmit data to slave
- * @adap:              pointer to I2C adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Sends one byte of data to the slave
  */
@@ -293,7 +293,7 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
 
 /**
  * i2c_pnx_master_rcv - receive data from slave
- * @adap:              pointer to I2C adapter structure
+ * @alg_data:          pointer to local driver data structure
  *
  * Reads one byte data from the slave
  */
index 214b4c9..6d635a7 100644 (file)
@@ -100,7 +100,7 @@ static const struct geni_i2c_err_log gi2c_log[] = {
        [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
        [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
        [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
-       [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unepxected start/stop"},
+       [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"},
        [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
        [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
        [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void geni_i2c_shutdown(struct platform_device *pdev)
+{
+       struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
+
+       /* Make client i2c transfers start failing */
+       i2c_mark_adapter_suspended(&gi2c->adap);
+}
+
 static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
 {
        int ret;
@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
 {
        struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
 
+       i2c_mark_adapter_suspended(&gi2c->adap);
+
        if (!gi2c->suspended) {
                geni_i2c_runtime_suspend(dev);
                pm_runtime_disable(dev);
@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
        return 0;
 }
 
+static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
+{
+       struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+       i2c_mark_adapter_resumed(&gi2c->adap);
+       return 0;
+}
+
 static const struct dev_pm_ops geni_i2c_pm_ops = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
        SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
                                                                        NULL)
 };
@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
 static struct platform_driver geni_i2c_driver = {
        .probe  = geni_i2c_probe,
        .remove = geni_i2c_remove,
+       .shutdown = geni_i2c_shutdown,
        .driver = {
                .name = "geni_i2c",
                .pm = &geni_i2c_pm_ops,
index ab92861..4d82761 100644 (file)
@@ -480,7 +480,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
                                         * forces us to send a new START
                                         * when we change direction
                                         */
+                                       dev_dbg(i2c->dev,
+                                               "missing START before write->read\n");
                                        s3c24xx_i2c_stop(i2c, -EINVAL);
+                                       break;
                                }
 
                                goto retry_write;
index 3ae6ca2..2d2e630 100644 (file)
@@ -807,7 +807,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
 static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
        { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
        { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
-       { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
+       { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config },
        { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
        { .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config },
        { .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config },
index faa81a9..8848231 100644 (file)
@@ -524,7 +524,7 @@ static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev)
 }
 
 /**
- * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read
+ * st_i2c_handle_read() - Handle FIFO empty interrupt in case of read
  * @i2c_dev: Controller's private data
  */
 static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
@@ -558,7 +558,7 @@ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev)
 }
 
 /**
- * st_i2c_isr() - Interrupt routine
+ * st_i2c_isr_thread() - Interrupt routine
  * @irq: interrupt number
  * @data: Controller's private data
  */
index 4933fc8..eebce7e 100644 (file)
@@ -313,7 +313,7 @@ static int stm32f4_i2c_wait_free_bus(struct stm32f4_i2c_dev *i2c_dev)
 }
 
 /**
- * stm32f4_i2c_write_ byte() - Write a byte in the data register
+ * stm32f4_i2c_write_byte() - Write a byte in the data register
  * @i2c_dev: Controller's private data
  * @byte: Data to write in the register
  */
index 3680d60..ec0c7ca 100644 (file)
@@ -65,7 +65,7 @@ static void tegra_bpmp_xlate_flags(u16 flags, u16 *out)
                *out |= SERIALI2C_RECV_LEN;
 }
 
-/**
+/*
  * The serialized I2C format is simply the following:
  * [addr little-endian][flags little-endian][len little-endian][data if write]
  * [addr little-endian][flags little-endian][len little-endian][data if write]
@@ -109,7 +109,7 @@ static void tegra_bpmp_serialize_i2c_msg(struct tegra_bpmp_i2c *i2c,
        request->xfer.data_size = pos;
 }
 
-/**
+/*
  * The data in the BPMP -> CPU direction is composed of sequential blocks for
  * those messages that have I2C_M_RD. So, for example, if you have:
  *
index 6dc8890..1c78657 100644 (file)
@@ -34,7 +34,7 @@ struct i2c_arbitrator_data {
 };
 
 
-/**
+/*
  * i2c_arbitrator_select - claim the I2C bus
  *
  * Use the GPIO-based signalling protocol; return -EBUSY if we fail.
@@ -77,7 +77,7 @@ static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan)
        return -EBUSY;
 }
 
-/**
+/*
  * i2c_arbitrator_deselect - release the I2C bus
  *
  * Release the I2C bus using the GPIO-based signalling protocol.
index 9d3952b..a27db78 100644 (file)
@@ -771,6 +771,13 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
                if (ret)
                        goto err;
 
+               if (channel >= indio_dev->num_channels) {
+                       dev_err(indio_dev->dev.parent,
+                               "Channel index >= number of channels\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+
                ret = of_property_read_u32_array(child, "diff-channels",
                                                 ain, 2);
                if (ret)
@@ -850,6 +857,11 @@ static int ad7124_setup(struct ad7124_state *st)
        return ret;
 }
 
+static void ad7124_reg_disable(void *r)
+{
+       regulator_disable(r);
+}
+
 static int ad7124_probe(struct spi_device *spi)
 {
        const struct ad7124_chip_info *info;
@@ -895,17 +907,20 @@ static int ad7124_probe(struct spi_device *spi)
                ret = regulator_enable(st->vref[i]);
                if (ret)
                        return ret;
+
+               ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
+                                              st->vref[i]);
+               if (ret)
+                       return ret;
        }
 
        st->mclk = devm_clk_get(&spi->dev, "mclk");
-       if (IS_ERR(st->mclk)) {
-               ret = PTR_ERR(st->mclk);
-               goto error_regulator_disable;
-       }
+       if (IS_ERR(st->mclk))
+               return PTR_ERR(st->mclk);
 
        ret = clk_prepare_enable(st->mclk);
        if (ret < 0)
-               goto error_regulator_disable;
+               return ret;
 
        ret = ad7124_soft_reset(st);
        if (ret < 0)
@@ -935,11 +950,6 @@ error_remove_trigger:
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
 error_clk_disable_unprepare:
        clk_disable_unprepare(st->mclk);
-error_regulator_disable:
-       for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
-               if (!IS_ERR_OR_NULL(st->vref[i]))
-                       regulator_disable(st->vref[i]);
-       }
 
        return ret;
 }
@@ -948,17 +958,11 @@ static int ad7124_remove(struct spi_device *spi)
 {
        struct iio_dev *indio_dev = spi_get_drvdata(spi);
        struct ad7124_state *st = iio_priv(indio_dev);
-       int i;
 
        iio_device_unregister(indio_dev);
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
        clk_disable_unprepare(st->mclk);
 
-       for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) {
-               if (!IS_ERR_OR_NULL(st->vref[i]))
-                       regulator_disable(st->vref[i]);
-       }
-
        return 0;
 }
 
index 2ed5805..1141cc1 100644 (file)
@@ -912,7 +912,7 @@ static int ad7192_probe(struct spi_device *spi)
 {
        struct ad7192_state *st;
        struct iio_dev *indio_dev;
-       int ret, voltage_uv = 0;
+       int ret;
 
        if (!spi->irq) {
                dev_err(&spi->dev, "no IRQ?\n");
@@ -949,15 +949,12 @@ static int ad7192_probe(struct spi_device *spi)
                goto error_disable_avdd;
        }
 
-       voltage_uv = regulator_get_voltage(st->avdd);
-
-       if (voltage_uv > 0) {
-               st->int_vref_mv = voltage_uv / 1000;
-       } else {
-               ret = voltage_uv;
+       ret = regulator_get_voltage(st->avdd);
+       if (ret < 0) {
                dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
                goto error_disable_avdd;
        }
+       st->int_vref_mv = ret / 1000;
 
        spi_set_drvdata(spi, indio_dev);
        st->chip_info = of_device_get_match_data(&spi->dev);
@@ -1014,7 +1011,9 @@ static int ad7192_probe(struct spi_device *spi)
        return 0;
 
 error_disable_clk:
-       clk_disable_unprepare(st->mclk);
+       if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+           st->clock_sel == AD7192_CLK_EXT_MCLK2)
+               clk_disable_unprepare(st->mclk);
 error_remove_trigger:
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
 error_disable_dvdd:
@@ -1031,7 +1030,9 @@ static int ad7192_remove(struct spi_device *spi)
        struct ad7192_state *st = iio_priv(indio_dev);
 
        iio_device_unregister(indio_dev);
-       clk_disable_unprepare(st->mclk);
+       if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+           st->clock_sel == AD7192_CLK_EXT_MCLK2)
+               clk_disable_unprepare(st->mclk);
        ad_sd_cleanup_buffer_and_trigger(indio_dev);
 
        regulator_disable(st->dvdd);
index c945f13..60f21fe 100644 (file)
@@ -167,6 +167,10 @@ struct ad7768_state {
         * transfer buffers to live in their own cache lines.
         */
        union {
+               struct {
+                       __be32 chan;
+                       s64 timestamp;
+               } scan;
                __be32 d32;
                u8 d8[2];
        } data ____cacheline_aligned;
@@ -469,11 +473,11 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
 
        mutex_lock(&st->lock);
 
-       ret = spi_read(st->spi, &st->data.d32, 3);
+       ret = spi_read(st->spi, &st->data.scan.chan, 3);
        if (ret < 0)
                goto err_unlock;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32,
+       iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
                                           iio_get_time_ns(indio_dev));
 
        iio_trigger_notify_done(indio_dev->trig);
index 5e980a0..440ef4c 100644 (file)
@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
        id &= AD7793_ID_MASK;
 
        if (id != st->chip_info->id) {
+               ret = -ENODEV;
                dev_err(&st->sd.spi->dev, "device ID query failed\n");
                goto out;
        }
index 9a64974..069b561 100644 (file)
@@ -59,8 +59,10 @@ struct ad7923_state {
        /*
         * DMA (thus cache coherency maintenance) requires the
         * transfer buffers to live in their own cache lines.
+        * Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp
+        * Length = 8 channels + 4 extra for 8 byte timestamp
         */
-       __be16                          rx_buf[4] ____cacheline_aligned;
+       __be16                          rx_buf[12] ____cacheline_aligned;
        __be16                          tx_buf[4];
 };
 
index 7ab2ccf..8107f7b 100644 (file)
@@ -524,23 +524,29 @@ static int ad5770r_channel_config(struct ad5770r_state *st)
        device_for_each_child_node(&st->spi->dev, child) {
                ret = fwnode_property_read_u32(child, "num", &num);
                if (ret)
-                       return ret;
-               if (num >= AD5770R_MAX_CHANNELS)
-                       return -EINVAL;
+                       goto err_child_out;
+               if (num >= AD5770R_MAX_CHANNELS) {
+                       ret = -EINVAL;
+                       goto err_child_out;
+               }
 
                ret = fwnode_property_read_u32_array(child,
                                                     "adi,range-microamp",
                                                     tmp, 2);
                if (ret)
-                       return ret;
+                       goto err_child_out;
 
                min = tmp[0] / 1000;
                max = tmp[1] / 1000;
                ret = ad5770r_store_output_range(st, min, max, num);
                if (ret)
-                       return ret;
+                       goto err_child_out;
        }
 
+       return 0;
+
+err_child_out:
+       fwnode_handle_put(child);
        return ret;
 }
 
index 1a20c6b..645461c 100644 (file)
@@ -399,6 +399,7 @@ static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val)
        ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp);
        if (ret < 0) {
                dev_err(dev, "failed to read temp: %d\n", ret);
+               fxas21002c_pm_put(data);
                goto data_unlock;
        }
 
@@ -432,6 +433,7 @@ static int fxas21002c_axis_get(struct fxas21002c_data *data,
                               &axis_be, sizeof(axis_be));
        if (ret < 0) {
                dev_err(dev, "failed to read axis: %d: %d\n", index, ret);
+               fxas21002c_pm_put(data);
                goto data_unlock;
        }
 
index d5e15a8..64e4be1 100644 (file)
@@ -3248,6 +3248,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
                goto err_free_attr;
        }
 
+       if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
+               err = -EINVAL;
+               goto err_uobj;
+       }
+
        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
        if (!qp) {
                err = -EINVAL;
index 22898d9..230a6ae 100644 (file)
@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
        props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
 
-       if (!mlx4_is_slave(dev->dev))
-               err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-
        if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
                resp.response_length += sizeof(resp.hca_core_clock_offset);
-               if (!err && !mlx4_is_slave(dev->dev)) {
+               if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
                        resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
                        resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
                }
@@ -1702,9 +1699,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
-       if (!rdma_is_port_valid(qp->device, flow_attr->port))
-               return ERR_PTR(-EINVAL);
-
        if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
                return ERR_PTR(-EOPNOTSUPP);
 
index eb92cef..9ce01f7 100644 (file)
@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
        ib_umem_release(cq->buf.umem);
 }
 
-static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
-                            struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
 {
        int i;
        void *cqe;
        struct mlx5_cqe64 *cqe64;
 
        for (i = 0; i < buf->nent; i++) {
-               cqe = get_cqe(cq, i);
+               cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
                cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
                cqe64->op_own = MLX5_CQE_INVALID << 4;
        }
@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        if (err)
                goto err_db;
 
-       init_cq_frag_buf(cq, &cq->buf);
+       init_cq_frag_buf(&cq->buf);
 
        *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
                 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        if (err)
                goto ex;
 
-       init_cq_frag_buf(cq, cq->resize_buf);
+       init_cq_frag_buf(cq->resize_buf);
 
        return 0;
 
index 61475b5..7af4df7 100644 (file)
@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
        struct ib_umem         *umem;
        unsigned long           user_virt;
        int                     refcnt;
+       struct mm_struct        *mm;
 };
 
 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
        mutex_lock(&context->db_page_mutex);
 
        list_for_each_entry(page, &context->db_page_list, list)
-               if (page->user_virt == (virt & PAGE_MASK))
+               if ((current->mm == page->mm) &&
+                   (page->user_virt == (virt & PAGE_MASK)))
                        goto found;
 
        page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
                kfree(page);
                goto out;
        }
+       mmgrab(current->mm);
+       page->mm = current->mm;
 
        list_add(&page->list, &context->db_page_list);
 
@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
 
        if (!--db->u.user_page->refcnt) {
                list_del(&db->u.user_page->list);
+               mmdrop(db->u.user_page->mm);
                ib_umem_release(db->u.user_page->umem);
                kfree(db->u.user_page);
        }
index 941adf5..5fbc0a8 100644 (file)
@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                goto free_ucmd;
        }
 
-       if (flow_attr->port > dev->num_ports ||
-           (flow_attr->flags &
-            ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
+       if (flow_attr->flags &
+           ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
                err = -EINVAL;
                goto free_ucmd;
        }
@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
        if (err)
                goto end;
 
+       if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+           mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+               err = -EINVAL;
+               goto end;
+       }
+
        uobj->object = obj;
        obj->mdev = dev->mdev;
        atomic_set(&obj->usecnt, 0);
index 9662cd3..425423d 100644 (file)
@@ -1940,8 +1940,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
                mlx5r_deref_wait_odp_mkey(&mr->mmkey);
 
        if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
-               xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
-                          NULL, GFP_KERNEL);
+               xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+                          mr->sig, NULL, GFP_KERNEL);
 
                if (mr->mtt_mr) {
                        rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
index d5a90a6..5b05cf3 100644 (file)
@@ -163,6 +163,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
 
 static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
        .kind           = "ipoib",
+       .netns_refund   = true,
        .maxtype        = IFLA_IPOIB_MAX,
        .policy         = ipoib_policy,
        .priv_size      = sizeof(struct ipoib_dev_priv),
index d1591a2..8f385f9 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  */
 
 #include <asm/div64.h>
@@ -205,6 +205,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
        }
        mutex_unlock(&bcm_voter_lock);
 
+       of_node_put(node);
        return voter;
 }
 EXPORT_SYMBOL_GPL(of_bcm_voter_get);
@@ -362,6 +363,7 @@ static const struct of_device_id bcm_voter_of_match[] = {
        { .compatible = "qcom,bcm-voter" },
        { }
 };
+MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
 
 static struct platform_driver qcom_icc_bcm_voter_driver = {
        .probe = qcom_icc_bcm_voter_probe,
index 80e8e19..3ac42bb 100644 (file)
@@ -884,7 +884,7 @@ static inline u64 build_inv_address(u64 address, size_t size)
                 * The msb-bit must be clear on the address. Just set all the
                 * lower bits.
                 */
-               address |= 1ull << (msb_diff - 1);
+               address |= (1ull << msb_diff) - 1;
        }
 
        /* Clear bits 11:0 */
@@ -1714,6 +1714,8 @@ static void amd_iommu_probe_finalize(struct device *dev)
        domain = iommu_get_domain_for_dev(dev);
        if (domain->type == IOMMU_DOMAIN_DMA)
                iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
+       else
+               set_dma_ops(dev, NULL);
 }
 
 static void amd_iommu_release_device(struct device *dev)
index 1757ac1..84057cb 100644 (file)
@@ -1142,7 +1142,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
 
                err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
                if (err)
-                       goto err_unmap;
+                       goto err_sysfs;
        }
 
        drhd->iommu = iommu;
@@ -1150,6 +1150,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
 
        return 0;
 
+err_sysfs:
+       iommu_device_sysfs_remove(&iommu->iommu);
 err_unmap:
        unmap_iommu(iommu);
 error_free_seq_id:
index 708f430..be35284 100644 (file)
@@ -2525,9 +2525,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
                                    struct device *dev,
                                    u32 pasid)
 {
-       int flags = PASID_FLAG_SUPERVISOR_MODE;
        struct dma_pte *pgd = domain->pgd;
        int agaw, level;
+       int flags = 0;
 
        /*
         * Skip top levels of page tables for iommu which has
@@ -2543,7 +2543,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
        if (level != 4 && level != 5)
                return -EINVAL;
 
-       flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+       if (pasid != PASID_RID2PASID)
+               flags |= PASID_FLAG_SUPERVISOR_MODE;
+       if (level == 5)
+               flags |= PASID_FLAG_FL5LP;
 
        if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
                flags |= PASID_FLAG_PAGE_SNOOP;
@@ -4606,6 +4609,8 @@ static int auxiliary_link_device(struct dmar_domain *domain,
 
        if (!sinfo) {
                sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+               if (!sinfo)
+                       return -ENOMEM;
                sinfo->domain = domain;
                sinfo->pdev = dev;
                list_add(&sinfo->link_phys, &info->subdevices);
index 72646ba..72dc848 100644 (file)
@@ -699,7 +699,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
         * Since it is a second level only translation setup, we should
         * set SRE bit as well (addresses are expected to be GPAs).
         */
-       pasid_set_sre(pte);
+       if (pasid != PASID_RID2PASID)
+               pasid_set_sre(pte);
        pasid_set_present(pte);
        pasid_flush_caches(iommu, pte, pasid, did);
 
index 7c02481..c6e5ee4 100644 (file)
@@ -1136,6 +1136,7 @@ static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
        { 0 },
 };
+MODULE_DEVICE_TABLE(virtio, id_table);
 
 static struct virtio_driver virtio_iommu_drv = {
        .driver.name            = KBUILD_MODNAME,
index 0a4551e..5fc989a 100644 (file)
@@ -364,7 +364,6 @@ struct cached_dev {
 
        /* The rest of this all shows up in sysfs */
        unsigned int            sequential_cutoff;
-       unsigned int            readahead;
 
        unsigned int            io_disable:1;
        unsigned int            verify:1;
index 29c2317..6d1de88 100644 (file)
@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                                 struct bio *bio, unsigned int sectors)
 {
        int ret = MAP_CONTINUE;
-       unsigned int reada = 0;
        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
        struct bio *miss, *cache_bio;
+       unsigned int size_limit;
 
        s->cache_missed = 1;
 
@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                goto out_submit;
        }
 
-       if (!(bio->bi_opf & REQ_RAHEAD) &&
-           !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
-           s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
-               reada = min_t(sector_t, dc->readahead >> 9,
-                             get_capacity(bio->bi_bdev->bd_disk) -
-                             bio_end_sector(bio));
-
-       s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+       /* Limitation for valid replace key size and cache_bio bvecs number */
+       size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+                          (1 << KEY_SIZE_BITS) - 1);
+       s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
 
        s->iop.replace_key = KEY(s->iop.inode,
                                 bio->bi_iter.bi_sector + s->insert_bio_sectors,
@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 
        s->iop.replace = true;
 
-       miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+       miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+                             &s->d->bio_split);
 
        /* btree_search_recurse()'s btree iterator is no good anymore */
        ret = miss == bio ? MAP_DONE : -EINTR;
@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
                goto out_put;
 
-       if (reada)
-               bch_mark_cache_readahead(s->iop.c, s->d);
-
        s->cache_miss   = miss;
        s->iop.bio      = cache_bio;
        bio_get(cache_bio);
index 503aafe..4c7ee5f 100644 (file)
@@ -46,7 +46,6 @@ read_attribute(cache_misses);
 read_attribute(cache_bypass_hits);
 read_attribute(cache_bypass_misses);
 read_attribute(cache_hit_ratio);
-read_attribute(cache_readaheads);
 read_attribute(cache_miss_collisions);
 read_attribute(bypassed);
 
@@ -64,7 +63,6 @@ SHOW(bch_stats)
                    DIV_SAFE(var(cache_hits) * 100,
                             var(cache_hits) + var(cache_misses)));
 
-       var_print(cache_readaheads);
        var_print(cache_miss_collisions);
        sysfs_hprint(bypassed,  var(sectors_bypassed) << 9);
 #undef var
@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
        &sysfs_cache_bypass_hits,
        &sysfs_cache_bypass_misses,
        &sysfs_cache_hit_ratio,
-       &sysfs_cache_readaheads,
        &sysfs_cache_miss_collisions,
        &sysfs_bypassed,
        NULL
@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
        acc->total.cache_misses = 0;
        acc->total.cache_bypass_hits = 0;
        acc->total.cache_bypass_misses = 0;
-       acc->total.cache_readaheads = 0;
        acc->total.cache_miss_collisions = 0;
        acc->total.sectors_bypassed = 0;
 }
@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
                scale_stat(&stats->cache_misses);
                scale_stat(&stats->cache_bypass_hits);
                scale_stat(&stats->cache_bypass_misses);
-               scale_stat(&stats->cache_readaheads);
                scale_stat(&stats->cache_miss_collisions);
                scale_stat(&stats->sectors_bypassed);
        }
@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
        move_stat(cache_misses);
        move_stat(cache_bypass_hits);
        move_stat(cache_bypass_misses);
-       move_stat(cache_readaheads);
        move_stat(cache_miss_collisions);
        move_stat(sectors_bypassed);
 
@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
        mark_cache_stats(&c->accounting.collector, hit, bypass);
 }
 
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
-{
-       struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
-       atomic_inc(&dc->accounting.collector.cache_readaheads);
-       atomic_inc(&c->accounting.collector.cache_readaheads);
-}
-
 void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
 {
        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
index abfaabf..ca4f435 100644 (file)
@@ -7,7 +7,6 @@ struct cache_stat_collector {
        atomic_t cache_misses;
        atomic_t cache_bypass_hits;
        atomic_t cache_bypass_misses;
-       atomic_t cache_readaheads;
        atomic_t cache_miss_collisions;
        atomic_t sectors_bypassed;
 };
index cc89f31..05ac1d6 100644 (file)
@@ -137,7 +137,6 @@ rw_attribute(io_disable);
 rw_attribute(discard);
 rw_attribute(running);
 rw_attribute(label);
-rw_attribute(readahead);
 rw_attribute(errors);
 rw_attribute(io_error_limit);
 rw_attribute(io_error_halflife);
@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
        var_printf(partial_stripes_expensive,   "%u");
 
        var_hprint(sequential_cutoff);
-       var_hprint(readahead);
 
        sysfs_print(running,            atomic_read(&dc->running));
        sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
@@ -365,7 +363,6 @@ STORE(__cached_dev)
        sysfs_strtoul_clamp(sequential_cutoff,
                            dc->sequential_cutoff,
                            0, UINT_MAX);
-       d_strtoi_h(readahead);
 
        if (attr == &sysfs_clear_stats)
                bch_cache_accounting_clear(&dc->accounting);
@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
        &sysfs_running,
        &sysfs_state,
        &sysfs_label,
-       &sysfs_readahead,
 #ifdef CONFIG_BCACHE_DEBUG
        &sysfs_verify,
        &sysfs_bypass_torture_test,
index b8e4d31..751ec5e 100644 (file)
@@ -859,7 +859,8 @@ static uint32_t __minimum_chunk_size(struct origin *o)
 
        if (o)
                list_for_each_entry(snap, &o->snapshots, list)
-                       chunk_size = min(chunk_size, snap->store->chunk_size);
+                       chunk_size = min_not_zero(chunk_size,
+                                                 snap->store->chunk_size);
 
        return (uint32_t) chunk_size;
 }
index 29385dc..db61a1f 100644 (file)
@@ -15,7 +15,7 @@
 #define DM_VERITY_VERIFY_ERR(s) DM_VERITY_ROOT_HASH_VERIFICATION " " s
 
 static bool require_signatures;
-module_param(require_signatures, bool, false);
+module_param(require_signatures, bool, 0444);
 MODULE_PARM_DESC(require_signatures,
                "Verify the roothash of dm-verity hash tree");
 
index 841e1c1..7d4ff8a 100644 (file)
@@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
        unsigned int chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
 
-       WARN_ON_ONCE(bio->bi_bdev->bd_partno);
-
        chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
        return  chunk_sectors >=
                ((sector & (chunk_sectors - 1)) + bio_sectors);
index a07674e..4c5621b 100644 (file)
@@ -468,6 +468,7 @@ static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
        pcr->ic_version = rtl8411_get_ic_version(pcr);
index 39a6a7e..29f5414 100644 (file)
@@ -255,6 +255,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 8200af2..4bcfbc9 100644 (file)
@@ -358,6 +358,7 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
 
@@ -483,6 +484,7 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
 
        rts5227_init_params(pcr);
        pcr->ops = &rts522a_pcr_ops;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
        pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
 
index 781a86d..ffc1282 100644 (file)
@@ -718,6 +718,7 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 89e6f12..c748eaf 100644 (file)
@@ -246,6 +246,7 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
 
index b2676e7..53f3a1f 100644 (file)
@@ -566,6 +566,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_CFG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
@@ -729,6 +730,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
 void rts524a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
@@ -845,6 +847,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
 void rts525a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
index 080a7d6..9b42b20 100644 (file)
@@ -628,6 +628,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 6c64dad..1fd4e0e 100644 (file)
@@ -783,6 +783,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = 0x00;
        pcr->sd30_drive_sel_3v3 = 0x00;
        pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
        pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
index 2733111..baf8359 100644 (file)
@@ -85,12 +85,18 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
        if (pcr->aspm_enabled == enable)
                return;
 
-       if (pcr->aspm_en & 0x02)
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
-                       FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
-       else
-               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
-                       FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+       if (pcr->aspm_mode == ASPM_MODE_CFG) {
+               pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+                                               PCI_EXP_LNKCTL_ASPMC,
+                                               enable ? pcr->aspm_en : 0);
+       } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+               if (pcr->aspm_en & 0x02)
+                       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+                               FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+               else
+                       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+                               FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+       }
 
        if (!enable && (pcr->aspm_en & 0x02))
                mdelay(10);
@@ -1394,7 +1400,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
                        return err;
        }
 
-       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+       if (pcr->aspm_mode == ASPM_MODE_REG)
+               rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
 
        /* No CD interrupt if probing driver with card inserted.
         * So we need to initialize pcr->card_exist here.
@@ -1410,6 +1417,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
 {
        int err;
+       u16 cfg_val;
+       u8 val;
 
        spin_lock_init(&pcr->lock);
        mutex_init(&pcr->pcr_mutex);
@@ -1477,6 +1486,21 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
        if (!pcr->slots)
                return -ENOMEM;
 
+       if (pcr->aspm_mode == ASPM_MODE_CFG) {
+               pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
+               if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
+                       pcr->aspm_enabled = true;
+               else
+                       pcr->aspm_enabled = false;
+
+       } else if (pcr->aspm_mode == ASPM_MODE_REG) {
+               rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+               if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+                       pcr->aspm_enabled = false;
+               else
+                       pcr->aspm_enabled = true;
+       }
+
        if (pcr->ops->fetch_vendor_settings)
                pcr->ops->fetch_vendor_settings(pcr);
 
@@ -1506,7 +1530,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        struct pcr_handle *handle;
        u32 base, len;
        int ret, i, bar = 0;
-       u8 val;
 
        dev_dbg(&(pcidev->dev),
                ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1572,11 +1595,6 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
        pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
        pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-       rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
-       if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
-               pcr->aspm_enabled = false;
-       else
-               pcr->aspm_enabled = true;
        pcr->card_inserted = 0;
        pcr->card_removed = 0;
        INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
index 64d33e3..67c5b45 100644 (file)
                printk(KERN_INFO a);    \
 } while (0)
 #define v2printk(a...) do {            \
-       if (verbose > 1)                \
+       if (verbose > 1) {              \
                printk(KERN_INFO a);    \
+       }                               \
        touch_nmi_watchdog();           \
 } while (0)
 #define eprintk(a...) do {             \
index a98f6b8..aab3ebf 100644 (file)
@@ -277,6 +277,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
                return ret;
        }
 
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_request_autosuspend(dev->dev);
+
        list_move_tail(&cb->list, &cl->rd_pending);
 
        return 0;
index 635bf31..baab4c2 100644 (file)
@@ -692,14 +692,19 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        /* Issue CMD19 twice for each tap */
        for (i = 0; i < 2 * priv->tap_num; i++) {
+               int cmd_error;
+
                /* Set sampling clock position */
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
 
-               if (mmc_send_tuning(mmc, opcode, NULL) == 0)
+               if (mmc_send_tuning(mmc, opcode, &cmd_error) == 0)
                        set_bit(i, priv->taps);
 
                if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
                        set_bit(i, priv->smpcmp);
+
+               if (cmd_error)
+                       mmc_abort_tuning(mmc, opcode);
        }
 
        ret = renesas_sdhi_select_tuning(host);
@@ -939,7 +944,7 @@ static const struct soc_device_attribute sdhi_quirks_match[]  = {
        { .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
        { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
        { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
-       { .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
+       { .soc_id = "r8a77961", .data = &sdhi_quirks_bad_taps1357 },
        { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
        { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
        { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
index edfad93..a7ee0af 100644 (file)
@@ -133,6 +133,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        skb->dev = bareudp->dev;
        oiph = skb_network_header(skb);
        skb_reset_network_header(skb);
+       skb_reset_mac_header(skb);
 
        if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
                err = IP_ECN_decapsulate(oiph, skb);
index d174823..4ffbfd5 100644 (file)
@@ -350,6 +350,7 @@ static int ldisc_open(struct tty_struct *tty)
        rtnl_lock();
        result = register_netdevice(dev);
        if (result) {
+               tty_kref_put(tty);
                rtnl_unlock();
                free_netdev(dev);
                return -ENODEV;
index 029e77d..a45865b 100644 (file)
@@ -82,6 +82,8 @@ struct mcba_priv {
        bool can_ka_first_pass;
        bool can_speed_check;
        atomic_t free_ctx_cnt;
+       void *rxbuf[MCBA_MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
 };
 
 /* CAN frame */
@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
        for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
                }
 
                buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
-                                        GFP_KERNEL, &urb->transfer_dma);
+                                        GFP_KERNEL, &buf_dma);
                if (!buf) {
                        netdev_err(netdev, "No memory left for USB buffer\n");
                        usb_free_urb(urb);
@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
                if (err) {
                        usb_unanchor_urb(urb);
                        usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
-                                         buf, urb->transfer_dma);
+                                         buf, buf_dma);
                        usb_free_urb(urb);
                        break;
                }
 
+               priv->rxbuf[i] = buf;
+               priv->rxbuf_dma[i] = buf_dma;
+
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
        }
@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
 
 static void mcba_urb_unlink(struct mcba_priv *priv)
 {
+       int i;
+
        usb_kill_anchored_urbs(&priv->rx_submitted);
+
+       for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
+               usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
+                                 priv->rxbuf[i], priv->rxbuf_dma[i]);
+
        usb_kill_anchored_urbs(&priv->tx_submitted);
 }
 
index 6e19945..b23e348 100644 (file)
@@ -728,6 +728,13 @@ static u16 b53_default_pvid(struct b53_device *dev)
                return 0;
 }
 
+static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
+{
+       struct b53_device *dev = ds->priv;
+
+       return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
+}
+
 int b53_configure_vlan(struct dsa_switch *ds)
 {
        struct b53_device *dev = ds->priv;
@@ -748,9 +755,20 @@ int b53_configure_vlan(struct dsa_switch *ds)
 
        b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
 
-       b53_for_each_port(dev, i)
+       /* Create an untagged VLAN entry for the default PVID in case
+        * CONFIG_VLAN_8021Q is disabled and there are no calls to
+        * dsa_slave_vlan_rx_add_vid() to create the default VLAN
+        * entry. Do this only when the tagging protocol is not
+        * DSA_TAG_PROTO_NONE
+        */
+       b53_for_each_port(dev, i) {
+               v = &dev->vlans[def_vid];
+               v->members |= BIT(i);
+               if (!b53_vlan_port_needs_forced_tagged(ds, i))
+                       v->untag = v->members;
                b53_write16(dev, B53_VLAN_PAGE,
                            B53_VLAN_PORT_DEF_TAG(i), def_vid);
+       }
 
        /* Upon initial call we have not set-up any VLANs, but upon
         * system resume, we need to restore all VLAN entries.
@@ -1460,13 +1478,6 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
        return 0;
 }
 
-static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
-{
-       struct b53_device *dev = ds->priv;
-
-       return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
-}
-
 int b53_vlan_add(struct dsa_switch *ds, int port,
                 const struct switchdev_obj_port_vlan *vlan,
                 struct netlink_ext_ack *extack)
index 8383cd6..b29d41e 100644 (file)
@@ -7,8 +7,8 @@ tristate "NXP SJA1105 Ethernet switch family support"
        select PACKING
        select CRC32
        help
-         This is the driver for the NXP SJA1105 automotive Ethernet switch
-         family. These are 5-port devices and are managed over an SPI
+         This is the driver for the NXP SJA1105 (5-port) and SJA1110 (10-port)
+         automotive Ethernet switch family. These are managed over an SPI
          interface. Probing is handled based on OF bindings and so is the
          linkage to PHYLINK. The driver supports the following revisions:
            - SJA1105E (Gen. 1, No TT-Ethernet)
@@ -17,6 +17,10 @@ tristate "NXP SJA1105 Ethernet switch family support"
            - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
            - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
            - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
+           - SJA1110A (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 10 ports)
+           - SJA1110B (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 9 ports)
+           - SJA1110C (Gen. 3, SGMII, TT-Ethernet, 100base-TX PHY, 7 ports)
+           - SJA1110D (Gen. 3, SGMII, TT-Ethernet, no 100base-TX PHY, 7 ports)
 
 config NET_DSA_SJA1105_PTP
        bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
index 3912472..221c7ab 100644 (file)
@@ -136,6 +136,7 @@ struct sja1105_info {
        int (*clocking_setup)(struct sja1105_private *priv);
        int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
        int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+       int (*disable_microcontroller)(struct sja1105_private *priv);
        const char *name;
        bool supports_mii[SJA1105_MAX_NUM_PORTS];
        bool supports_rmii[SJA1105_MAX_NUM_PORTS];
@@ -363,7 +364,7 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port);
 int sja1110_setup_rgmii_delay(const void *ctx, int port);
 int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
 int sja1105_clocking_setup(struct sja1105_private *priv);
-int sja1110_clocking_setup(struct sja1105_private *priv);
+int sja1110_disable_microcontroller(struct sja1105_private *priv);
 
 /* From sja1105_ethtool.c */
 void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
index 645edea..387a1f2 100644 (file)
@@ -6,6 +6,7 @@
 #include "sja1105.h"
 
 #define SJA1105_SIZE_CGU_CMD   4
+#define SJA1110_BASE_MCSS_CLK  SJA1110_CGU_ADDR(0x70)
 #define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74)
 
 /* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
@@ -832,17 +833,30 @@ sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
        sja1105_packing(buf, &outclk->pd,         0,  0, size, op);
 }
 
-/* Power down the BASE_TIMER_CLK in order to disable the watchdog */
-int sja1110_clocking_setup(struct sja1105_private *priv)
+int sja1110_disable_microcontroller(struct sja1105_private *priv)
 {
        u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+       struct sja1110_cgu_outclk outclk_6_c = {
+               .clksrc = 0x3,
+               .pd = true,
+       };
        struct sja1110_cgu_outclk outclk_7_c = {
                .clksrc = 0x5,
                .pd = true,
        };
+       int rc;
 
+       /* Power down the BASE_TIMER_CLK to disable the watchdog timer */
        sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
 
-       return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
+       rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
+                             packed_buf, SJA1105_SIZE_CGU_CMD);
+       if (rc)
+               return rc;
+
+       /* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
+       sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
+
+       return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
                                packed_buf, SJA1105_SIZE_CGU_CMD);
 }
index 8e5cdf9..a9777eb 100644 (file)
@@ -1886,17 +1886,23 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
        mutex_lock(&priv->ptp_data.lock);
 
        rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
-       if (rc < 0)
-               goto out_unlock_ptp;
+       if (rc < 0) {
+               mutex_unlock(&priv->ptp_data.lock);
+               goto out;
+       }
 
        /* Reset switch and send updated static configuration */
        rc = sja1105_static_config_upload(priv);
-       if (rc < 0)
-               goto out_unlock_ptp;
+       if (rc < 0) {
+               mutex_unlock(&priv->ptp_data.lock);
+               goto out;
+       }
 
        rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
-       if (rc < 0)
-               goto out_unlock_ptp;
+       if (rc < 0) {
+               mutex_unlock(&priv->ptp_data.lock);
+               goto out;
+       }
 
        t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
        t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
@@ -1911,7 +1917,6 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
 
        __sja1105_ptp_adjtime(ds, now);
 
-out_unlock_ptp:
        mutex_unlock(&priv->ptp_data.lock);
 
        dev_info(priv->ds->dev,
@@ -1922,9 +1927,11 @@ out_unlock_ptp:
         * For these interfaces there is no dynamic configuration
         * needed, since PLLs have same settings at all speeds.
         */
-       rc = priv->info->clocking_setup(priv);
-       if (rc < 0)
-               goto out;
+       if (priv->info->clocking_setup) {
+               rc = priv->info->clocking_setup(priv);
+               if (rc < 0)
+                       goto out;
+       }
 
        for (i = 0; i < ds->num_ports; i++) {
                struct dw_xpcs *xpcs = priv->xpcs[i];
@@ -3032,18 +3039,34 @@ static int sja1105_setup(struct dsa_switch *ds)
                goto out_ptp_clock_unregister;
        }
 
+       if (priv->info->disable_microcontroller) {
+               rc = priv->info->disable_microcontroller(priv);
+               if (rc < 0) {
+                       dev_err(ds->dev,
+                               "Failed to disable microcontroller: %pe\n",
+                               ERR_PTR(rc));
+                       goto out_mdiobus_unregister;
+               }
+       }
+
        /* Create and send configuration down to device */
        rc = sja1105_static_config_load(priv);
        if (rc < 0) {
                dev_err(ds->dev, "Failed to load static config: %d\n", rc);
                goto out_mdiobus_unregister;
        }
+
        /* Configure the CGU (PHY link modes and speeds) */
-       rc = priv->info->clocking_setup(priv);
-       if (rc < 0) {
-               dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
-               goto out_static_config_free;
+       if (priv->info->clocking_setup) {
+               rc = priv->info->clocking_setup(priv);
+               if (rc < 0) {
+                       dev_err(ds->dev,
+                               "Failed to configure MII clocking: %pe\n",
+                               ERR_PTR(rc));
+                       goto out_static_config_free;
+               }
        }
+
        /* On SJA1105, VLAN filtering per se is always enabled in hardware.
         * The only thing we can do to disable it is lie about what the 802.1Q
         * EtherType is.
index 4aed16d..08cc5db 100644 (file)
@@ -199,7 +199,11 @@ static int sja1110_reset_cmd(struct dsa_switch *ds)
        const struct sja1105_regs *regs = priv->info->regs;
        u32 switch_reset = BIT(20);
 
-       /* Switch core reset */
+       /* Only reset the switch core.
+        * A full cold reset would re-enable the BASE_MCSS_CLOCK PLL which
+        * would turn on the microcontroller, potentially letting it execute
+        * code which could interfere with our configuration.
+        */
        return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &switch_reset, NULL);
 }
 
@@ -796,7 +800,7 @@ const struct sja1105_info sja1110a_info = {
        .ptp_cmd_packing        = sja1105pqrs_ptp_cmd_packing,
        .rxtstamp               = sja1110_rxtstamp,
        .txtstamp               = sja1110_txtstamp,
-       .clocking_setup         = sja1110_clocking_setup,
+       .disable_microcontroller = sja1110_disable_microcontroller,
        .pcs_mdio_read          = sja1110_pcs_mdio_read,
        .pcs_mdio_write         = sja1110_pcs_mdio_write,
        .port_speed             = {
@@ -847,7 +851,7 @@ const struct sja1105_info sja1110b_info = {
        .ptp_cmd_packing        = sja1105pqrs_ptp_cmd_packing,
        .rxtstamp               = sja1110_rxtstamp,
        .txtstamp               = sja1110_txtstamp,
-       .clocking_setup         = sja1110_clocking_setup,
+       .disable_microcontroller = sja1110_disable_microcontroller,
        .pcs_mdio_read          = sja1110_pcs_mdio_read,
        .pcs_mdio_write         = sja1110_pcs_mdio_write,
        .port_speed             = {
@@ -898,7 +902,7 @@ const struct sja1105_info sja1110c_info = {
        .ptp_cmd_packing        = sja1105pqrs_ptp_cmd_packing,
        .rxtstamp               = sja1110_rxtstamp,
        .txtstamp               = sja1110_txtstamp,
-       .clocking_setup         = sja1110_clocking_setup,
+       .disable_microcontroller = sja1110_disable_microcontroller,
        .pcs_mdio_read          = sja1110_pcs_mdio_read,
        .pcs_mdio_write         = sja1110_pcs_mdio_write,
        .port_speed             = {
@@ -949,7 +953,7 @@ const struct sja1105_info sja1110d_info = {
        .ptp_cmd_packing        = sja1105pqrs_ptp_cmd_packing,
        .rxtstamp               = sja1110_rxtstamp,
        .txtstamp               = sja1110_txtstamp,
-       .clocking_setup         = sja1110_clocking_setup,
+       .disable_microcontroller = sja1110_disable_microcontroller,
        .pcs_mdio_read          = sja1110_pcs_mdio_read,
        .pcs_mdio_write         = sja1110_pcs_mdio_write,
        .port_speed             = {
index 1491b72..7a422ef 100644 (file)
@@ -1052,8 +1052,7 @@ sja1105_static_config_check_valid(const struct sja1105_static_config *config,
        (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
 
        if (tables[BLK_IDX_SCHEDULE].entry_count) {
-               if (config->device_id != SJA1105T_DEVICE_ID &&
-                   config->device_id != SJA1105QS_DEVICE_ID)
+               if (!tables[BLK_IDX_SCHEDULE].ops->max_entry_count)
                        return SJA1105_TTETHERNET_NOT_SUPPORTED;
 
                if (tables[BLK_IDX_SCHEDULE_ENTRY_POINTS].entry_count == 0)
index 44ef6b8..0e43000 100644 (file)
@@ -239,36 +239,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
 static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
                                struct ena_tx_buffer *tx_info,
                                struct xdp_frame *xdpf,
-                               void **push_hdr,
-                               u32 *push_len)
+                               struct ena_com_tx_ctx *ena_tx_ctx)
 {
        struct ena_adapter *adapter = xdp_ring->adapter;
        struct ena_com_buf *ena_buf;
-       dma_addr_t dma = 0;
+       int push_len = 0;
+       dma_addr_t dma;
+       void *data;
        u32 size;
 
        tx_info->xdpf = xdpf;
+       data = tx_info->xdpf->data;
        size = tx_info->xdpf->len;
-       ena_buf = tx_info->bufs;
 
-       /* llq push buffer */
-       *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
-       *push_hdr = tx_info->xdpf->data;
+       if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               /* Designate part of the packet for LLQ */
+               push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
+
+               ena_tx_ctx->push_header = data;
+
+               size -= push_len;
+               data += push_len;
+       }
+
+       ena_tx_ctx->header_len = push_len;
 
-       if (size - *push_len > 0) {
+       if (size > 0) {
                dma = dma_map_single(xdp_ring->dev,
-                                    *push_hdr + *push_len,
-                                    size - *push_len,
+                                    data,
+                                    size,
                                     DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
                        goto error_report_dma_error;
 
-               tx_info->map_linear_data = 1;
-               tx_info->num_of_bufs = 1;
-       }
+               tx_info->map_linear_data = 0;
 
-       ena_buf->paddr = dma;
-       ena_buf->len = size;
+               ena_buf = tx_info->bufs;
+               ena_buf->paddr = dma;
+               ena_buf->len = size;
+
+               ena_tx_ctx->ena_bufs = ena_buf;
+               ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+       }
 
        return 0;
 
@@ -277,10 +289,6 @@ error_report_dma_error:
                          &xdp_ring->syncp);
        netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
 
-       xdp_return_frame_rx_napi(tx_info->xdpf);
-       tx_info->xdpf = NULL;
-       tx_info->num_of_bufs = 0;
-
        return -EINVAL;
 }
 
@@ -292,8 +300,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
        struct ena_com_tx_ctx ena_tx_ctx = {};
        struct ena_tx_buffer *tx_info;
        u16 next_to_use, req_id;
-       void *push_hdr;
-       u32 push_len;
        int rc;
 
        next_to_use = xdp_ring->next_to_use;
@@ -301,15 +307,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
        tx_info = &xdp_ring->tx_buffer_info[req_id];
        tx_info->num_of_bufs = 0;
 
-       rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
+       rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
        if (unlikely(rc))
                return rc;
 
-       ena_tx_ctx.ena_bufs = tx_info->bufs;
-       ena_tx_ctx.push_header = push_hdr;
-       ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
        ena_tx_ctx.req_id = req_id;
-       ena_tx_ctx.header_len = push_len;
 
        rc = ena_xmit_common(dev,
                             xdp_ring,
index 45e380f..11ef1fb 100644 (file)
@@ -1876,6 +1876,7 @@ out_free_netdev:
        free_netdev(netdev);
 out_pci_release:
        pci_release_mem_regions(pdev);
+       pci_disable_pcie_error_reporting(pdev);
 out_pci_disable:
        pci_disable_device(pdev);
        return err;
index cb88ffb..1a02ca6 100644 (file)
@@ -206,6 +206,7 @@ config SYSTEMPORT
 config BNXT
        tristate "Broadcom NetXtreme-C/E support"
        depends on PCI
+       imply PTP_1588_CLOCK
        select FW_LOADER
        select LIBCRC32C
        select NET_DEVLINK
index cb97ec5..2b8ae68 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_BNXT) += bnxt_en.o
 
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
 bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
 bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
index fcc729d..8f185a4 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/log2.h>
 #include <linux/aer.h>
 #include <linux/bitmap.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
 #include <linux/cpu_rmap.h>
 #include <linux/cpumask.h>
 #include <net/pkt_cls.h>
@@ -63,6 +65,7 @@
 #include "bnxt_ethtool.h"
 #include "bnxt_dcb.h"
 #include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
 #include "bnxt_vfr.h"
 #include "bnxt_tc.h"
 #include "bnxt_devlink.h"
@@ -418,12 +421,25 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
        }
 
-       if (unlikely(skb->no_fcs)) {
-               lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
-               goto normal_tx;
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+               struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+               if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
+                   atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
+                       if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
+                               lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+                               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+                       } else {
+                               atomic_inc(&bp->ptp_cfg->tx_avail);
+                       }
+               }
        }
 
-       if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+       if (unlikely(skb->no_fcs))
+               lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
+
+       if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
+           !lflags) {
                struct tx_push_buffer *tx_push_buf = txr->tx_push;
                struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
                struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -590,6 +606,8 @@ normal_tx:
 
        netdev_tx_sent_queue(txq, skb->len);
 
+       skb_tx_timestamp(skb);
+
        /* Sync BD data before updating doorbell */
        wmb();
 
@@ -619,6 +637,9 @@ tx_done:
        return NETDEV_TX_OK;
 
 tx_dma_error:
+       if (BNXT_TX_PTP_IS_SET(lflags))
+               atomic_inc(&bp->ptp_cfg->tx_avail);
+
        last_frag = i;
 
        /* start back at beginning and unmap skb */
@@ -653,6 +674,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 
        for (i = 0; i < nr_pkts; i++) {
                struct bnxt_sw_tx_bd *tx_buf;
+               bool compl_deferred = false;
                struct sk_buff *skb;
                int j, last;
 
@@ -679,12 +701,21 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
                                skb_frag_size(&skb_shinfo(skb)->frags[j]),
                                PCI_DMA_TODEVICE);
                }
+               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+                       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+                               if (!bnxt_get_tx_ts_p5(bp, skb))
+                                       compl_deferred = true;
+                               else
+                                       atomic_inc(&bp->ptp_cfg->tx_avail);
+                       }
+               }
 
 next_tx_int:
                cons = NEXT_TX(cons);
 
                tx_bytes += skb->len;
-               dev_kfree_skb_any(skb);
+               if (!compl_deferred)
+                       dev_kfree_skb_any(skb);
        }
 
        netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
@@ -1706,9 +1737,9 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        u8 *data_ptr, agg_bufs, cmp_type;
        dma_addr_t dma_addr;
        struct sk_buff *skb;
+       u32 flags, misc;
        void *data;
        int rc = 0;
-       u32 misc;
 
        rxcmp = (struct rx_cmp *)
                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1806,7 +1837,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                goto next_rx_no_len;
        }
 
-       len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+       flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
+       len = flags >> RX_CMP_LEN_SHIFT;
        dma_addr = rx_buf->mapping;
 
        if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
@@ -1883,6 +1915,24 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                }
        }
 
+       if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
+                    RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
+               if (bp->flags & BNXT_FLAG_CHIP_P5) {
+                       u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+                       u64 ns, ts;
+
+                       if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
+                               struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+                               spin_lock_bh(&ptp->ptp_lock);
+                               ns = timecounter_cyc2time(&ptp->tc, ts);
+                               spin_unlock_bh(&ptp->ptp_lock);
+                               memset(skb_hwtstamps(skb), 0,
+                                      sizeof(*skb_hwtstamps(skb)));
+                               skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+                       }
+               }
+       }
        bnxt_deliver_skb(bp, bnapi, skb);
        rc = 1;
 
@@ -7308,7 +7358,7 @@ skip_rdma:
        entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
                     2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
        entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
-       entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
+       entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
        entries = roundup(entries, ctx->tqm_entries_multiple);
        entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
        for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
@@ -7391,6 +7441,56 @@ hwrm_func_resc_qcaps_exit:
        return rc;
 }
 
+/* bp->hwrm_cmd_lock already held. */
+static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+       struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_mac_ptp_qcfg_input req = {0};
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u8 flags;
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10801) {
+               rc = -ENODEV;
+               goto no_ptp;
+       }
+
+       req.port_id = cpu_to_le16(bp->pf.port_id);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               goto no_ptp;
+
+       flags = resp->flags;
+       if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+               rc = -ENODEV;
+               goto no_ptp;
+       }
+       if (!ptp) {
+               ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+               if (!ptp)
+                       return -ENOMEM;
+               ptp->bp = bp;
+               bp->ptp_cfg = ptp;
+       }
+       if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+               ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
+               ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
+       } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
+               ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
+               ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
+       } else {
+               rc = -ENODEV;
+               goto no_ptp;
+       }
+       return 0;
+
+no_ptp:
+       kfree(ptp);
+       bp->ptp_cfg = NULL;
+       return rc;
+}
+
 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
        int rc = 0;
@@ -7462,6 +7562,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->flags &= ~BNXT_FLAG_WOL_CAP;
                if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
                        bp->flags |= BNXT_FLAG_WOL_CAP;
+               if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
+                       __bnxt_hwrm_ptp_qcfg(bp);
        } else {
 #ifdef CONFIG_BNXT_SRIOV
                struct bnxt_vf_info *vf = &bp->vf;
@@ -10020,6 +10122,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
                }
        }
 
+       bnxt_ptp_start(bp);
        rc = bnxt_init_nic(bp, irq_re_init);
        if (rc) {
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
@@ -10335,6 +10438,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
                                                mdio->val_in);
 
+       case SIOCSHWTSTAMP:
+               return bnxt_hwtstamp_set(dev, ifr);
+
+       case SIOCGHWTSTAMP:
+               return bnxt_hwtstamp_get(dev, ifr);
+
        default:
                /* do nothing */
                break;
@@ -11750,6 +11859,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
        bnxt_hwrm_coal_params_qcaps(bp);
 }
 
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
+
 static int bnxt_fw_init_one(struct bnxt *bp)
 {
        int rc;
@@ -11764,6 +11875,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
                netdev_err(bp->dev, "Firmware init phase 2 failed\n");
                return rc;
        }
+       rc = bnxt_probe_phy(bp, false);
+       if (rc)
+               return rc;
        rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
        if (rc)
                return rc;
@@ -12546,6 +12660,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
 
        if (BNXT_PF(bp))
                devlink_port_type_clear(&bp->dl_port);
+
+       bnxt_ptp_clear(bp);
        pci_disable_pcie_error_reporting(pdev);
        unregister_netdev(dev);
        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -12566,6 +12682,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        bnxt_dcb_free(bp);
        kfree(bp->edev);
        bp->edev = NULL;
+       kfree(bp->ptp_cfg);
+       bp->ptp_cfg = NULL;
        kfree(bp->fw_health);
        bp->fw_health = NULL;
        bnxt_cleanup_pci(bp);
@@ -13127,6 +13245,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                   rc);
        }
 
+       if (bnxt_ptp_init(bp)) {
+               netdev_warn(dev, "PTP initialization failed.\n");
+               kfree(bp->ptp_cfg);
+               bp->ptp_cfg = NULL;
+       }
        bnxt_inv_fw_health_reg(bp);
        bnxt_dl_register(bp);
 
@@ -13155,6 +13278,9 @@ init_err_pci_clean:
        bnxt_hwrm_func_drv_unrgtr(bp);
        bnxt_free_hwrm_short_cmd_req(bp);
        bnxt_free_hwrm_resources(bp);
+       bnxt_ethtool_free(bp);
+       kfree(bp->ptp_cfg);
+       bp->ptp_cfg = NULL;
        kfree(bp->fw_health);
        bp->fw_health = NULL;
        bnxt_cleanup_pci(bp);
index 30e47ea..bcf8d00 100644 (file)
@@ -89,6 +89,8 @@ struct tx_bd_ext {
        #define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
 };
 
+#define BNXT_TX_PTP_IS_SET(lflags) ((lflags) & cpu_to_le32(TX_BD_FLAGS_STAMP))
+
 struct rx_bd {
        __le32 rx_bd_len_flags_type;
        #define RX_BD_TYPE                                      (0x3f << 0)
@@ -159,6 +161,7 @@ struct rx_cmp {
        #define RX_CMP_FLAGS_RSS_VALID                          (1 << 10)
        #define RX_CMP_FLAGS_UNUSED                             (1 << 11)
         #define RX_CMP_FLAGS_ITYPES_SHIFT                       12
+        #define RX_CMP_FLAGS_ITYPES_MASK                        0xf000
         #define RX_CMP_FLAGS_ITYPE_UNKNOWN                      (0 << 12)
         #define RX_CMP_FLAGS_ITYPE_IP                           (1 << 12)
         #define RX_CMP_FLAGS_ITYPE_TCP                          (2 << 12)
@@ -240,7 +243,7 @@ struct rx_cmp_ext {
        #define RX_CMPL_CFA_CODE_MASK                           (0xffff << 16)
         #define RX_CMPL_CFA_CODE_SFT                            16
 
-       __le32 rx_cmp_unused3;
+       __le32 rx_cmp_timestamp;
 };
 
 #define RX_CMP_L2_ERRORS                                               \
@@ -1362,6 +1365,9 @@ struct bnxt_test_info {
 #define BNXT_GRC_REG_CHIP_NUM                  0x48
 #define BNXT_GRC_REG_BASE                      0x260000
 
+#define BNXT_TS_REG_TIMESYNC_TS0_LOWER         0x640180c
+#define BNXT_TS_REG_TIMESYNC_TS0_UPPER         0x6401810
+
 #define BNXT_GRC_BASE_MASK                     0xfffff000
 #define BNXT_GRC_OFFSET_MASK                   0x00000ffc
 
@@ -2042,6 +2048,8 @@ struct bnxt {
 
        struct bpf_prog         *xdp_prog;
 
+       struct bnxt_ptp_cfg     *ptp_cfg;
+
        /* devlink interface and vf-rep structs */
        struct devlink          *dl;
        struct devlink_port     dl_port;
index c664ec5..786ca51 100644 (file)
 #include <linux/firmware.h>
 #include <linux/utsname.h>
 #include <linux/time.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
 #include "bnxt_hsi.h"
 #include "bnxt.h"
 #include "bnxt_xdp.h"
+#include "bnxt_ptp.h"
 #include "bnxt_ethtool.h"
 #include "bnxt_nvm_defs.h"     /* NVRAM content constant and structure defs */
 #include "bnxt_fw_hdr.h"       /* Firmware hdr constant and structure defs */
@@ -3926,6 +3930,35 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
        return 0;
 }
 
+static int bnxt_get_ts_info(struct net_device *dev,
+                           struct ethtool_ts_info *info)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_ptp_cfg *ptp;
+
+       ptp = bp->ptp_cfg;
+       info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                               SOF_TIMESTAMPING_RX_SOFTWARE |
+                               SOF_TIMESTAMPING_SOFTWARE;
+
+       info->phc_index = -1;
+       if (!ptp)
+               return 0;
+
+       info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+                                SOF_TIMESTAMPING_RX_HARDWARE |
+                                SOF_TIMESTAMPING_RAW_HARDWARE;
+       if (ptp->ptp_clock)
+               info->phc_index = ptp_clock_index(ptp->ptp_clock);
+
+       info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+       info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+       return 0;
+}
+
 void bnxt_ethtool_init(struct bnxt *bp)
 {
        struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
@@ -4172,6 +4205,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
        .nway_reset             = bnxt_nway_reset,
        .set_phys_id            = bnxt_set_phys_id,
        .self_test              = bnxt_self_test,
+       .get_ts_info            = bnxt_get_ts_info,
        .reset                  = bnxt_reset,
        .set_dump               = bnxt_set_dump,
        .get_dump_flag          = bnxt_get_dump_flag,
index 6199f12..3fc6781 100644 (file)
@@ -189,6 +189,8 @@ struct cmd_nums {
        #define HWRM_QUEUE_VLANPRI_QCAPS                  0x83UL
        #define HWRM_QUEUE_VLANPRI2PRI_QCFG               0x84UL
        #define HWRM_QUEUE_VLANPRI2PRI_CFG                0x85UL
+       #define HWRM_QUEUE_GLOBAL_CFG                     0x86UL
+       #define HWRM_QUEUE_GLOBAL_QCFG                    0x87UL
        #define HWRM_CFA_L2_FILTER_ALLOC                  0x90UL
        #define HWRM_CFA_L2_FILTER_FREE                   0x91UL
        #define HWRM_CFA_L2_FILTER_CFG                    0x92UL
@@ -250,6 +252,8 @@ struct cmd_nums {
        #define HWRM_PORT_SFP_SIDEBAND_QCFG               0xd7UL
        #define HWRM_FW_STATE_UNQUIESCE                   0xd8UL
        #define HWRM_PORT_DSC_DUMP                        0xd9UL
+       #define HWRM_PORT_EP_TX_QCFG                      0xdaUL
+       #define HWRM_PORT_EP_TX_CFG                       0xdbUL
        #define HWRM_TEMP_MONITOR_QUERY                   0xe0UL
        #define HWRM_REG_POWER_QUERY                      0xe1UL
        #define HWRM_CORE_FREQUENCY_QUERY                 0xe2UL
@@ -305,6 +309,8 @@ struct cmd_nums {
        #define HWRM_CFA_EEM_OP                           0x123UL
        #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS              0x124UL
        #define HWRM_CFA_TFLIB                            0x125UL
+       #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR            0x126UL
+       #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR          0x127UL
        #define HWRM_ENGINE_CKV_STATUS                    0x12eUL
        #define HWRM_ENGINE_CKV_CKEK_ADD                  0x12fUL
        #define HWRM_ENGINE_CKV_CKEK_DELETE               0x130UL
@@ -356,6 +362,12 @@ struct cmd_nums {
        #define HWRM_STAT_EXT_CTX_QUERY                   0x199UL
        #define HWRM_FUNC_SPD_CFG                         0x19aUL
        #define HWRM_FUNC_SPD_QCFG                        0x19bUL
+       #define HWRM_FUNC_PTP_PIN_QCFG                    0x19cUL
+       #define HWRM_FUNC_PTP_PIN_CFG                     0x19dUL
+       #define HWRM_FUNC_PTP_CFG                         0x19eUL
+       #define HWRM_FUNC_PTP_TS_QUERY                    0x19fUL
+       #define HWRM_FUNC_PTP_EXT_CFG                     0x1a0UL
+       #define HWRM_FUNC_PTP_EXT_QCFG                    0x1a1UL
        #define HWRM_SELFTEST_QLIST                       0x200UL
        #define HWRM_SELFTEST_EXEC                        0x201UL
        #define HWRM_SELFTEST_IRQ                         0x202UL
@@ -373,6 +385,10 @@ struct cmd_nums {
        #define HWRM_MFG_PARAM_SEEPROM_SYNC               0x20eUL
        #define HWRM_MFG_PARAM_SEEPROM_READ               0x20fUL
        #define HWRM_MFG_PARAM_SEEPROM_HEALTH             0x210UL
+       #define HWRM_MFG_PRVSN_EXPORT_CSR                 0x211UL
+       #define HWRM_MFG_PRVSN_IMPORT_CERT                0x212UL
+       #define HWRM_MFG_PRVSN_GET_STATE                  0x213UL
+       #define HWRM_MFG_GET_NVM_MEASUREMENT              0x214UL
        #define HWRM_TF                                   0x2bcUL
        #define HWRM_TF_VERSION_GET                       0x2bdUL
        #define HWRM_TF_SESSION_OPEN                      0x2c6UL
@@ -385,6 +401,7 @@ struct cmd_nums {
        #define HWRM_TF_SESSION_RESC_ALLOC                0x2cdUL
        #define HWRM_TF_SESSION_RESC_FREE                 0x2ceUL
        #define HWRM_TF_SESSION_RESC_FLUSH                0x2cfUL
+       #define HWRM_TF_SESSION_RESC_INFO                 0x2d0UL
        #define HWRM_TF_TBL_TYPE_GET                      0x2daUL
        #define HWRM_TF_TBL_TYPE_SET                      0x2dbUL
        #define HWRM_TF_TBL_TYPE_BULK_GET                 0x2dcUL
@@ -399,6 +416,7 @@ struct cmd_nums {
        #define HWRM_TF_EM_INSERT                         0x2eaUL
        #define HWRM_TF_EM_DELETE                         0x2ebUL
        #define HWRM_TF_EM_HASH_INSERT                    0x2ecUL
+       #define HWRM_TF_EM_MOVE                           0x2edUL
        #define HWRM_TF_TCAM_SET                          0x2f8UL
        #define HWRM_TF_TCAM_GET                          0x2f9UL
        #define HWRM_TF_TCAM_MOVE                         0x2faUL
@@ -427,6 +445,16 @@ struct cmd_nums {
        #define HWRM_DBG_QCAPS                            0xff20UL
        #define HWRM_DBG_QCFG                             0xff21UL
        #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG             0xff22UL
+       #define HWRM_DBG_USEQ_ALLOC                       0xff23UL
+       #define HWRM_DBG_USEQ_FREE                        0xff24UL
+       #define HWRM_DBG_USEQ_FLUSH                       0xff25UL
+       #define HWRM_DBG_USEQ_QCAPS                       0xff26UL
+       #define HWRM_DBG_USEQ_CW_CFG                      0xff27UL
+       #define HWRM_DBG_USEQ_SCHED_CFG                   0xff28UL
+       #define HWRM_DBG_USEQ_RUN                         0xff29UL
+       #define HWRM_DBG_USEQ_DELIVERY_REQ                0xff2aUL
+       #define HWRM_DBG_USEQ_RESP_HDR                    0xff2bUL
+       #define HWRM_NVM_DEFRAG                           0xffecUL
        #define HWRM_NVM_REQ_ARBITRATION                  0xffedUL
        #define HWRM_NVM_FACTORY_DEFAULTS                 0xffeeUL
        #define HWRM_NVM_VALIDATE_OPTION                  0xffefUL
@@ -471,6 +499,7 @@ struct ret_codes {
        #define HWRM_ERR_CODE_HWRM_ERROR                   0xfUL
        #define HWRM_ERR_CODE_BUSY                         0x10UL
        #define HWRM_ERR_CODE_RESOURCE_LOCKED              0x11UL
+       #define HWRM_ERR_CODE_PF_UNAVAILABLE               0x12UL
        #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE    0x8000UL
        #define HWRM_ERR_CODE_UNKNOWN_ERR                  0xfffeUL
        #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED            0xffffUL
@@ -502,8 +531,8 @@ struct hwrm_err_output {
 #define HWRM_VERSION_MAJOR 1
 #define HWRM_VERSION_MINOR 10
 #define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 16
-#define HWRM_VERSION_STR "1.10.2.16"
+#define HWRM_VERSION_RSVD 47
+#define HWRM_VERSION_STR "1.10.2.47"
 
 /* hwrm_ver_get_input (size:192b/24B) */
 struct hwrm_ver_get_input {
@@ -604,7 +633,8 @@ struct hwrm_ver_get_output {
        __le16  roce_fw_build;
        __le16  roce_fw_patch;
        __le16  max_ext_req_len;
-       u8      unused_1[5];
+       __le16  max_req_timeout;
+       u8      unused_1[3];
        u8      valid;
 };
 
@@ -725,7 +755,10 @@ struct hwrm_async_event_cmpl {
        #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE          0x40UL
        #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE    0x41UL
        #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST               0x42UL
-       #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID          0x43UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_MASTER                 0x43UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP              0x44UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT               0x45UL
+       #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID          0x46UL
        #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG               0xfeUL
        #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR                 0xffUL
        #define ASYNC_EVENT_CMPL_EVENT_ID_LAST                      ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -919,6 +952,8 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
        #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
        #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST         ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
        __le32  event_data2;
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
        u8      opaque_v;
        #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V          0x1UL
        #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
@@ -1074,6 +1109,223 @@ struct hwrm_async_event_cmpl_echo_request {
        __le32  event_data1;
 };
 
+/* hwrm_async_event_cmpl_phc_master (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_master {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_LAST             ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER 0x43UL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_LAST      ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER
+       __le32  event_data2;
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_MASK   0xffff0000UL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_SFT    16
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_V          0x1UL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_MASK         0xfUL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_SFT          0
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_MASTER     0x1UL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_SECONDARY  0x2UL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER   0x3UL
+       #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_LAST          ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST             ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST         ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+       __le32  event_data2;
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE              0x1UL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL       0x0UL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL       0x1UL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST          ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK         0xeUL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT          1
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V          0x1UL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+       #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST             ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST        ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+       __le32  event_data2;
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_V          0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST             ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST      ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+       __le32  event_data2;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK    0xffUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT     0
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING   0x0UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL  0x1UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL     0x2UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST     ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_V          0x1UL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP     0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST             ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST        ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+       __le32  event_data2;
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V          0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK          0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT           0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED        0x0UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM     0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL  0x2UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM             0x3UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST           ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST             ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST        ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+       __le32  event_data2;
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V          0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK       0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT        0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM  0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST        ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST             ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST        ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+       __le32  event_data2;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V          0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK          0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT           0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL  0x2UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST           ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+       __le16  type;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK            0x3fUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT             0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST             ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+       __le16  event_id;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST        ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+       __le32  event_data2;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+       u8      opaque_v;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V          0x1UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+       u8      timestamp_lo;
+       __le16  timestamp_hi;
+       __le32  event_data1;
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK     0xffUL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT      0
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR  0x3UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST      ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK   0xff00UL
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT    8
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE    (0x1UL << 8)
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE    (0x2UL << 8)
+       #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST    ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
 /* hwrm_func_reset_input (size:192b/24B) */
 struct hwrm_func_reset_input {
        __le16  req_type;
@@ -1302,7 +1554,7 @@ struct hwrm_func_qcaps_output {
        __le32  max_flow_id;
        __le32  max_hw_ring_grps;
        __le16  max_sp_tx_rings;
-       u8      unused_0[2];
+       __le16  max_msix_vfs;
        __le32  flags_ext;
        #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED                     0x1UL
        #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED                    0x2UL
@@ -1320,6 +1572,14 @@ struct hwrm_func_qcaps_output {
        #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED            0x2000UL
        #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED                  0x4000UL
        #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED                 0x8000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED                     0x10000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED                      0x20000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED                      0x40000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED          0x80000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED                 0x100000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED           0x200000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED                         0x400000UL
+       #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL                        0x800000UL
        u8      max_schqs;
        u8      mpc_chnls_cap;
        #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE         0x1UL
@@ -1342,7 +1602,7 @@ struct hwrm_func_qcfg_input {
        u8      unused_0[6];
 };
 
-/* hwrm_func_qcfg_output (size:768b/96B) */
+/* hwrm_func_qcfg_output (size:832b/104B) */
 struct hwrm_func_qcfg_output {
        __le16  error_code;
        __le16  req_type;
@@ -1366,6 +1626,7 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED         0x800UL
        #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED           0x1000UL
        #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT                   0x2000UL
+       #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV            0x4000UL
        u8      mac_address[6];
        __le16  pci_id;
        __le16  alloc_rsscos_ctx;
@@ -1374,7 +1635,7 @@ struct hwrm_func_qcfg_output {
        __le16  alloc_rx_rings;
        __le16  alloc_l2_ctx;
        __le16  alloc_vnics;
-       __le16  mtu;
+       __le16  admin_mtu;
        __le16  mru;
        __le16  stat_ctx_id;
        u8      port_partition_type;
@@ -1383,6 +1644,7 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
        #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST   FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
        u8      port_pf_cnt;
@@ -1463,11 +1725,35 @@ struct hwrm_func_qcfg_output {
        #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED      0x4UL
        #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED      0x8UL
        #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED     0x10UL
-       u8      unused_2[6];
+       u8      unused_2[3];
+       __le32  partition_min_bw;
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT              0
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE                     0x10000000UL
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST                 FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT         29
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+       __le32  partition_max_bw;
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT              0
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE                     0x10000000UL
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST                 FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT         29
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+       __le16  host_mtu;
+       u8      unused_3;
        u8      valid;
 };
 
-/* hwrm_func_cfg_input (size:768b/96B) */
+/* hwrm_func_cfg_input (size:832b/104B) */
 struct hwrm_func_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -1504,7 +1790,7 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE             0x20000000UL
        #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE            0x40000000UL
        __le32  enables;
-       #define FUNC_CFG_REQ_ENABLES_MTU                      0x1UL
+       #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU                0x1UL
        #define FUNC_CFG_REQ_ENABLES_MRU                      0x2UL
        #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS          0x4UL
        #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS           0x8UL
@@ -1530,7 +1816,11 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT     0x800000UL
        #define FUNC_CFG_REQ_ENABLES_SCHQ_ID                  0x1000000UL
        #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS                0x2000000UL
-       __le16  mtu;
+       #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW         0x4000000UL
+       #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW         0x8000000UL
+       #define FUNC_CFG_REQ_ENABLES_TPID                     0x10000000UL
+       #define FUNC_CFG_REQ_ENABLES_HOST_MTU                 0x20000000UL
+       __le16  admin_mtu;
        __le16  mru;
        __le16  num_rsscos_ctxs;
        __le16  num_cmpl_rings;
@@ -1615,7 +1905,30 @@ struct hwrm_func_cfg_input {
        #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE      0x80UL
        #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE      0x100UL
        #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE     0x200UL
-       u8      unused_0[4];
+       __le32  partition_min_bw;
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT              0
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE                     0x10000000UL
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST                 FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT         29
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+       __le32  partition_max_bw;
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT              0
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE                     0x10000000UL
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST                 FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT         29
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+       __be16  tpid;
+       __le16  host_mtu;
 };
 
 /* hwrm_func_cfg_output (size:128b/16B) */
@@ -1777,14 +2090,15 @@ struct hwrm_func_drv_rgtr_input {
        __le16  target_id;
        __le64  resp_addr;
        __le32  flags;
-       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE               0x1UL
-       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE              0x2UL
-       #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE             0x4UL
-       #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE     0x8UL
-       #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT          0x10UL
-       #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT     0x20UL
-       #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT             0x40UL
-       #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT         0x80UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE                     0x1UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE                    0x2UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE                   0x4UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE           0x8UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT                0x10UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT           0x20UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT                   0x40UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT               0x80UL
+       #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT     0x100UL
        __le32  enables;
        #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE             0x1UL
        #define FUNC_DRV_RGTR_REQ_ENABLES_VER                 0x2UL
@@ -2047,7 +2361,7 @@ struct hwrm_func_backing_store_qcaps_input {
        __le64  resp_addr;
 };
 
-/* hwrm_func_backing_store_qcaps_output (size:704b/88B) */
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
 struct hwrm_func_backing_store_qcaps_output {
        __le16  error_code;
        __le16  req_type;
@@ -2085,6 +2399,8 @@ struct hwrm_func_backing_store_qcaps_output {
        #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC     0x8UL
        #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT     0x10UL
        #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV     0x20UL
+       #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC      0x40UL
+       #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC      0x80UL
        u8      qp_init_offset;
        u8      srq_init_offset;
        u8      cq_init_offset;
@@ -2093,7 +2409,13 @@ struct hwrm_func_backing_store_qcaps_output {
        u8      stat_init_offset;
        u8      mrav_init_offset;
        u8      tqm_fp_rings_count_ext;
-       u8      rsvd[5];
+       u8      tkc_init_offset;
+       u8      rkc_init_offset;
+       __le16  tkc_entry_size;
+       __le16  rkc_entry_size;
+       __le32  tkc_max_entries;
+       __le32  rkc_max_entries;
+       u8      rsvd[7];
        u8      valid;
 };
 
@@ -2120,7 +2442,7 @@ struct tqm_fp_ring_cfg {
        __le64  tqm_ring_page_dir;
 };
 
-/* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
 struct hwrm_func_backing_store_cfg_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -2150,6 +2472,8 @@ struct hwrm_func_backing_store_cfg_input {
        #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8      0x10000UL
        #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9      0x20000UL
        #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10     0x40000UL
+       #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC            0x80000UL
+       #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC            0x100000UL
        u8      qpc_pg_size_qpc_lvl;
        #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK      0xfUL
        #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT       0
@@ -2508,6 +2832,45 @@ struct hwrm_func_backing_store_cfg_input {
        u8      ring10_unused[3];
        __le32  tqm_ring10_num_entries;
        __le64  tqm_ring10_page_dir;
+       __le32  tkc_num_entries;
+       __le32  rkc_num_entries;
+       __le64  tkc_page_dir;
+       __le64  rkc_page_dir;
+       __le16  tkc_entry_size;
+       __le16  rkc_entry_size;
+       u8      tkc_pg_size_tkc_lvl;
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK      0xfUL
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT       0
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0       0x0UL
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1       0x1UL
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2       0x2UL
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK  0xf0UL
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT   4
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K   (0x0UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K   (0x1UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K  (0x2UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M   (0x3UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M   (0x4UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G   (0x5UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+       u8      rkc_pg_size_rkc_lvl;
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK      0xfUL
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT       0
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0       0x0UL
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1       0x1UL
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2       0x2UL
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST       FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK  0xf0UL
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT   4
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K   (0x0UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K   (0x1UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K  (0x2UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M   (0x3UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M   (0x4UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G   (0x5UL << 4)
+       #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST   FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+       u8      rsvd[2];
 };
 
 /* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -2634,6 +2997,212 @@ struct hwrm_func_echo_response_output {
        u8      valid;
 };
 
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+       __le16  req_type;
+       __le16  cmpl_ring;
+       __le16  seq_id;
+       __le16  target_id;
+       __le64  resp_addr;
+       u8      unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+       __le16  error_code;
+       __le16  req_type;
+       __le16  seq_id;
+       __le16  resp_len;
+       u8      num_pins;
+       u8      state;
+       #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED     0x1UL
+       #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED     0x2UL
+       #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED     0x4UL
+       #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED     0x8UL
+       u8      pin0_usage;
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST    FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+       u8      pin1_usage;
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST    FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+       u8      pin2_usage;
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST    FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT
+       u8      pin3_usage;
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST    FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT
+       u8      unused_0;
+       u8      valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+       __le16  req_type;
+       __le16  cmpl_ring;
+       __le16  seq_id;
+       __le16  target_id;
+       __le64  resp_addr;
+       __le32  enables;
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE     0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE     0x2UL
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE     0x4UL
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE     0x8UL
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE     0x10UL
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE     0x20UL
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE     0x40UL
+       #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE     0x80UL
+       u8      pin0_state;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED  0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+       u8      pin0_usage;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+       u8      pin1_state;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED  0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+       u8      pin1_usage;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+       u8      pin2_state;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED  0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+       u8      pin2_usage;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT
+       u8      pin3_state;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED  0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+       u8      pin3_usage;
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE     0x0UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN   0x1UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT  0x2UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN  0x3UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+       #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST    FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT
+       u8      unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+       __le16  error_code;
+       __le16  req_type;
+       __le16  seq_id;
+       __le16  resp_len;
+       u8      unused_0[7];
+       u8      valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:320b/40B) */
+struct hwrm_func_ptp_cfg_input {
+       __le16  req_type;
+       __le16  cmpl_ring;
+       __le16  seq_id;
+       __le16  target_id;
+       __le64  resp_addr;
+       __le16  enables;
+       #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT               0x1UL
+       #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE     0x2UL
+       #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE      0x4UL
+       #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD     0x8UL
+       #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP         0x10UL
+       #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE      0x20UL
+       u8      ptp_pps_event;
+       #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL     0x1UL
+       #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL     0x2UL
+       u8      ptp_freq_adj_dll_source;
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE    0x0UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0  0x1UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1  0x2UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2  0x3UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3  0x4UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0  0x5UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1  0x6UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2  0x7UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3  0x8UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST   FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+       u8      ptp_freq_adj_dll_phase;
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K   0x1UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K   0x2UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M  0x3UL
+       #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M
+       u8      unused_0[3];
+       __le32  ptp_freq_adj_ext_period;
+       __le32  ptp_freq_adj_ext_up;
+       __le32  ptp_freq_adj_ext_phase_lower;
+       __le32  ptp_freq_adj_ext_phase_upper;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+       __le16  error_code;
+       __le16  req_type;
+       __le16  seq_id;
+       __le16  resp_len;
+       u8      unused_0[7];
+       u8      valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+       __le16  req_type;
+       __le16  cmpl_ring;
+       __le16  seq_id;
+       __le16  target_id;
+       __le64  resp_addr;
+       __le32  flags;
+       #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME     0x1UL
+       #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME     0x2UL
+       u8      unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+       __le16  error_code;
+       __le16  req_type;
+       __le16  seq_id;
+       __le16  resp_len;
+       __le64  pps_event_ts;
+       __le64  ptm_res_local_ts;
+       __le64  ptm_pmstr_ts;
+       __le32  ptm_mstr_prop_dly;
+       u8      unused_0[3];
+       u8      valid;
+};
+
 /* hwrm_func_drv_if_change_input (size:192b/24B) */
 struct hwrm_func_drv_if_change_input {
        __le16  req_type;
@@ -3156,6 +3725,7 @@ struct hwrm_port_mac_cfg_input {
        #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE     0x80UL
        #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG                  0x100UL
        #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB               0x200UL
+       #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE                  0x400UL
        __le16  port_id;
        u8      ipg;
        u8      lpbk;
@@ -3188,8 +3758,8 @@ struct hwrm_port_mac_cfg_input {
        #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK          0xe0UL
        #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT           5
        u8      unused_0[3];
-       __s32   ptp_freq_adj_ppb;
-       u8      unused_1[4];
+       __le32  ptp_freq_adj_ppb;
+       __le32  ptp_adj_phase;
 };
 
 /* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -3221,16 +3791,17 @@ struct hwrm_port_mac_ptp_qcfg_input {
        u8      unused_0[6];
 };
 
-/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
 struct hwrm_port_mac_ptp_qcfg_output {
        __le16  error_code;
        __le16  req_type;
        __le16  seq_id;
        __le16  resp_len;
        u8      flags;
-       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS      0x1UL
-       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS     0x4UL
-       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS        0x8UL
+       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS                       0x1UL
+       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS                      0x4UL
+       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS                         0x8UL
+       #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK     0x10UL
        u8      unused_0[3];
        __le32  rx_ts_reg_off_lower;
        __le32  rx_ts_reg_off_upper;
@@ -3247,6 +3818,8 @@ struct hwrm_port_mac_ptp_qcfg_output {
        __le32  tx_ts_reg_off_seq_id;
        __le32  tx_ts_reg_off_fifo;
        __le32  tx_ts_reg_off_granularity;
+       __le32  ts_ref_clock_reg_lower;
+       __le32  ts_ref_clock_reg_upper;
        u8      unused_1[7];
        u8      valid;
 };
@@ -3647,7 +4220,7 @@ struct hwrm_port_lpbk_clr_stats_output {
        u8      valid;
 };
 
-/* hwrm_port_ts_query_input (size:192b/24B) */
+/* hwrm_port_ts_query_input (size:256b/32B) */
 struct hwrm_port_ts_query_input {
        __le16  req_type;
        __le16  cmpl_ring;
@@ -3662,6 +4235,11 @@ struct hwrm_port_ts_query_input {
        #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME     0x2UL
        __le16  port_id;
        u8      unused_0[2];
+       __le16  enables;
+       #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT     0x1UL
+       #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID         0x2UL
+       __le16  ts_req_timeout;
+       __le32  ptp_seq_id;
 };
 
 /* hwrm_port_ts_query_output (size:192b/24B) */
@@ -4215,7 +4793,8 @@ struct hwrm_queue_qportcfg_output {
        u8      max_configurable_lossless_queues;
        u8      queue_cfg_allowed;
        u8      queue_cfg_info;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG     0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG             0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE     0x2UL
        u8      queue_pfcenable_cfg_allowed;
        u8      queue_pri2cos_cfg_allowed;
        u8      queue_cos2bw_cfg_allowed;
@@ -5467,6 +6046,7 @@ struct hwrm_vnic_qcaps_output {
        #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP                      0x400UL
        #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP           0x800UL
        #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP                 0x1000UL
+       #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP            0x2000UL
        __le16  max_aggs_supported;
        u8      unused_1[5];
        u8      valid;
@@ -7224,6 +7804,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
        #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED        0x4000UL
        #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE                              0x8000UL
        #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED     0x10000UL
+       #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED                                0x20000UL
        u8      unused_0[3];
        u8      valid;
 };
@@ -7914,11 +8495,14 @@ struct hwrm_temp_monitor_query_output {
        u8      phy_temp;
        u8      om_temp;
        u8      flags;
-       #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE         0x1UL
-       #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE     0x2UL
-       #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT             0x4UL
-       #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE      0x8UL
-       u8      unused_0[3];
+       #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE            0x1UL
+       #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE        0x2UL
+       #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT                0x4UL
+       #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE         0x8UL
+       #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE     0x10UL
+       u8      temp2;
+       u8      phy_temp2;
+       u8      om_temp2;
        u8      valid;
 };
 
@@ -8109,6 +8693,7 @@ struct hwrm_dbg_qcaps_output {
        #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM          0x1UL
        #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR     0x2UL
        #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR      0x4UL
+       #define DBG_QCAPS_RESP_FLAGS_USEQ                   0x8UL
        u8      unused_1[3];
        u8      valid;
 };
@@ -8632,10 +9217,11 @@ struct hwrm_nvm_install_update_output {
 /* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
 struct hwrm_nvm_install_update_cmd_err {
        u8      code;
-       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN  0x0UL
-       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
-       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
-       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST    NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN       0x0UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR      0x1UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE      0x2UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST         NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK
        u8      unused_0[7];
 };
 
@@ -8876,6 +9462,7 @@ struct fw_status_reg {
        #define FW_STATUS_REG_CRASHDUMP_COMPLETE     0x80000UL
        #define FW_STATUS_REG_SHUTDOWN               0x100000UL
        #define FW_STATUS_REG_CRASHED_NO_MASTER      0x200000UL
+       #define FW_STATUS_REG_RECOVERING             0x400000UL
 };
 
 /* hcomm_status (size:64b/8B) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
new file mode 100644 (file)
index 0000000..f698b6b
--- /dev/null
@@ -0,0 +1,473 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include <linux/timekeeping.h>
+#include <linux/ptp_classify.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ptp.h"
+
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id)
+{
+       unsigned int ptp_class;
+       struct ptp_header *hdr;
+
+       ptp_class = ptp_classify_raw(skb);
+
+       switch (ptp_class & PTP_CLASS_VMASK) {
+       case PTP_CLASS_V1:
+       case PTP_CLASS_V2:
+               hdr = ptp_parse_header(skb, ptp_class);
+               if (!hdr)
+                       return -EINVAL;
+
+               *seq_id  = ntohs(hdr->sequence_id);
+               return 0;
+       default:
+               return -ERANGE;
+       }
+}
+
+static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
+                           const struct timespec64 *ts)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+       u64 ns = timespec64_to_ns(ts);
+
+       spin_lock_bh(&ptp->ptp_lock);
+       timecounter_init(&ptp->tc, &ptp->cc, ns);
+       spin_unlock_bh(&ptp->ptp_lock);
+       return 0;
+}
+
+/* Caller holds ptp_lock */
+static u64 bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u64 ns;
+
+       ptp_read_system_prets(sts);
+       ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+       ptp_read_system_postts(sts);
+       ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+       return ns;
+}
+
+static void bnxt_ptp_get_current_time(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+       if (!ptp)
+               return;
+       spin_lock_bh(&ptp->ptp_lock);
+       WRITE_ONCE(ptp->old_time, ptp->current_time);
+       ptp->current_time = bnxt_refclk_read(bp, NULL);
+       spin_unlock_bh(&ptp->ptp_lock);
+}
+
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+{
+       struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_ts_query_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1);
+       req.flags = cpu_to_le32(flags);
+       if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
+           PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+               req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
+               req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
+               req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+       }
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               *ts = le64_to_cpu(resp->ptp_msg_ts);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
+static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
+                            struct timespec64 *ts,
+                            struct ptp_system_timestamp *sts)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+       u64 ns, cycles;
+
+       spin_lock_bh(&ptp->ptp_lock);
+       cycles = bnxt_refclk_read(ptp->bp, sts);
+       ns = timecounter_cyc2time(&ptp->tc, cycles);
+       spin_unlock_bh(&ptp->ptp_lock);
+       *ts = ns_to_timespec64(ns);
+
+       return 0;
+}
+
+static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+
+       spin_lock_bh(&ptp->ptp_lock);
+       timecounter_adjtime(&ptp->tc, delta);
+       spin_unlock_bh(&ptp->ptp_lock);
+       return 0;
+}
+
+static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+       struct hwrm_port_mac_cfg_input req = {0};
+       struct bnxt *bp = ptp->bp;
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+       req.ptp_freq_adj_ppb = cpu_to_le32(ppb);
+       req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               netdev_err(ptp->bp->dev,
+                          "ptp adjfreq failed. rc = %d\n", rc);
+       return rc;
+}
+
+static int bnxt_ptp_enable(struct ptp_clock_info *ptp,
+                          struct ptp_clock_request *rq, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+       struct hwrm_port_mac_cfg_input req = {0};
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u32 flags = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+       if (ptp->rx_filter)
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+       else
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+       if (ptp->tx_tstamp_en)
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+       else
+               flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+       req.flags = cpu_to_le32(flags);
+       req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+       req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwtstamp_config stmpconf;
+       struct bnxt_ptp_cfg *ptp;
+       u16 old_rxctl;
+       int old_rx_filter, rc;
+       u8 old_tx_tstamp_en;
+
+       ptp = bp->ptp_cfg;
+       if (!ptp)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
+               return -EFAULT;
+
+       if (stmpconf.flags)
+               return -EINVAL;
+
+       if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
+           stmpconf.tx_type != HWTSTAMP_TX_OFF)
+               return -ERANGE;
+
+       old_rx_filter = ptp->rx_filter;
+       old_rxctl = ptp->rxctl;
+       old_tx_tstamp_en = ptp->tx_tstamp_en;
+       switch (stmpconf.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               ptp->rxctl = 0;
+               ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+               ptp->rxctl = BNXT_PTP_MSG_EVENTS;
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+               ptp->rxctl = BNXT_PTP_MSG_SYNC;
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               ptp->rxctl = BNXT_PTP_MSG_DELAY_REQ;
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+               ptp->tx_tstamp_en = 1;
+       else
+               ptp->tx_tstamp_en = 0;
+
+       rc = bnxt_hwrm_ptp_cfg(bp);
+       if (rc)
+               goto ts_set_err;
+
+       stmpconf.rx_filter = ptp->rx_filter;
+       return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+               -EFAULT : 0;
+
+ts_set_err:
+       ptp->rx_filter = old_rx_filter;
+       ptp->rxctl = old_rxctl;
+       ptp->tx_tstamp_en = old_tx_tstamp_en;
+       return rc;
+}
+
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwtstamp_config stmpconf;
+       struct bnxt_ptp_cfg *ptp;
+
+       ptp = bp->ptp_cfg;
+       if (!ptp)
+               return -EOPNOTSUPP;
+
+       stmpconf.flags = 0;
+       stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+       stmpconf.rx_filter = ptp->rx_filter;
+       return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
+               -EFAULT : 0;
+}
+
+static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
+{
+       u32 reg_base = *reg_arr & BNXT_GRC_BASE_MASK;
+       u32 win_off;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               if ((reg_arr[i] & BNXT_GRC_BASE_MASK) != reg_base)
+                       return -ERANGE;
+       }
+       win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
+       writel(reg_base, bp->bar0 + win_off);
+       return 0;
+}
+
+static int bnxt_map_ptp_regs(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u32 *reg_arr;
+       int rc, i;
+
+       reg_arr = ptp->refclk_regs;
+       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+               rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
+               if (rc)
+                       return rc;
+               for (i = 0; i < 2; i++)
+                       ptp->refclk_mapped_regs[i] = BNXT_PTP_GRC_WIN_BASE +
+                               (ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
+               return 0;
+       }
+       return -ENODEV;
+}
+
+static void bnxt_unmap_ptp_regs(struct bnxt *bp)
+{
+       writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+                 (BNXT_PTP_GRC_WIN - 1) * 4);
+}
+
+static u64 bnxt_cc_read(const struct cyclecounter *cc)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
+
+       return bnxt_refclk_read(ptp->bp, NULL);
+}
+
+static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       struct skb_shared_hwtstamps timestamp;
+       u64 ts = 0, ns = 0;
+       int rc;
+
+       rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+       if (!rc) {
+               memset(&timestamp, 0, sizeof(timestamp));
+               spin_lock_bh(&ptp->ptp_lock);
+               ns = timecounter_cyc2time(&ptp->tc, ts);
+               spin_unlock_bh(&ptp->ptp_lock);
+               timestamp.hwtstamp = ns_to_ktime(ns);
+               skb_tstamp_tx(ptp->tx_skb, &timestamp);
+       } else {
+               netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n",
+                          rc);
+       }
+
+       dev_kfree_skb_any(ptp->tx_skb);
+       ptp->tx_skb = NULL;
+       atomic_inc(&ptp->tx_avail);
+}
+
+static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
+{
+       struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+                                               ptp_info);
+       unsigned long now = jiffies;
+       struct bnxt *bp = ptp->bp;
+
+       if (ptp->tx_skb)
+               bnxt_stamp_tx_skb(bp, ptp->tx_skb);
+
+       if (!time_after_eq(now, ptp->next_period))
+               return ptp->next_period - now;
+
+       bnxt_ptp_get_current_time(bp);
+       ptp->next_period = now + HZ;
+       return HZ;
+}
+
+int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+       if (ptp->tx_skb) {
+               netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
+               return -EBUSY;
+       }
+       ptp->tx_skb = skb;
+       ptp_schedule_worker(ptp->ptp_clock, 0);
+       return 0;
+}
+
+int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       u64 time;
+
+       if (!ptp)
+               return -ENODEV;
+
+       BNXT_READ_TIME64(ptp, time, ptp->old_time);
+       *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
+       if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
+               *ts += BNXT_LO_TIMER_MASK + 1;
+
+       return 0;
+}
+
+void bnxt_ptp_start(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+       if (!ptp)
+               return;
+
+       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+               spin_lock_bh(&ptp->ptp_lock);
+               ptp->current_time = bnxt_refclk_read(bp, NULL);
+               WRITE_ONCE(ptp->old_time, ptp->current_time);
+               spin_unlock_bh(&ptp->ptp_lock);
+               ptp_schedule_worker(ptp->ptp_clock, 0);
+       }
+}
+
+static const struct ptp_clock_info bnxt_ptp_caps = {
+       .owner          = THIS_MODULE,
+       .name           = "bnxt clock",
+       .max_adj        = BNXT_MAX_PHC_DRIFT,
+       .n_alarm        = 0,
+       .n_ext_ts       = 0,
+       .n_per_out      = 0,
+       .n_pins         = 0,
+       .pps            = 0,
+       .adjfreq        = bnxt_ptp_adjfreq,
+       .adjtime        = bnxt_ptp_adjtime,
+       .do_aux_work    = bnxt_ptp_ts_aux_work,
+       .gettimex64     = bnxt_ptp_gettimex,
+       .settime64      = bnxt_ptp_settime,
+       .enable         = bnxt_ptp_enable,
+};
+
+int bnxt_ptp_init(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+       int rc;
+
+       if (!ptp)
+               return 0;
+
+       rc = bnxt_map_ptp_regs(bp);
+       if (rc)
+               return rc;
+
+       atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
+       spin_lock_init(&ptp->ptp_lock);
+
+       memset(&ptp->cc, 0, sizeof(ptp->cc));
+       ptp->cc.read = bnxt_cc_read;
+       ptp->cc.mask = CYCLECOUNTER_MASK(48);
+       ptp->cc.shift = 0;
+       ptp->cc.mult = 1;
+
+       timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+       ptp->ptp_info = bnxt_ptp_caps;
+       ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev);
+       if (IS_ERR(ptp->ptp_clock)) {
+               int err = PTR_ERR(ptp->ptp_clock);
+
+               ptp->ptp_clock = NULL;
+               bnxt_unmap_ptp_regs(bp);
+               return err;
+       }
+
+       return 0;
+}
+
+void bnxt_ptp_clear(struct bnxt *bp)
+{
+       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+       if (!ptp)
+               return;
+
+       if (ptp->ptp_clock)
+               ptp_clock_unregister(ptp->ptp_clock);
+
+       ptp->ptp_clock = NULL;
+       if (ptp->tx_skb) {
+               dev_kfree_skb_any(ptp->tx_skb);
+               ptp->tx_skb = NULL;
+       }
+       bnxt_unmap_ptp_regs(bp);
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
new file mode 100644 (file)
index 0000000..6b62457
--- /dev/null
@@ -0,0 +1,81 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_PTP_H
+#define BNXT_PTP_H
+
+#define BNXT_PTP_GRC_WIN       5
+#define BNXT_PTP_GRC_WIN_BASE  0x5000
+
+#define BNXT_MAX_PHC_DRIFT     31000000
+#define BNXT_LO_TIMER_MASK     0x0000ffffffffUL
+#define BNXT_HI_TIMER_MASK     0xffff00000000UL
+
+#define BNXT_PTP_QTS_TIMEOUT   1000
+#define BNXT_PTP_QTS_TX_ENABLES        (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
+                                PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT)
+
+struct bnxt_ptp_cfg {
+       struct ptp_clock_info   ptp_info;
+       struct ptp_clock        *ptp_clock;
+       struct cyclecounter     cc;
+       struct timecounter      tc;
+       /* serialize timecounter access */
+       spinlock_t              ptp_lock;
+       struct sk_buff          *tx_skb;
+       u64                     current_time;
+       u64                     old_time;
+       unsigned long           next_period;
+       u16                     tx_seqid;
+       struct bnxt             *bp;
+       atomic_t                tx_avail;
+#define BNXT_MAX_TX_TS 1
+       u16                     rxctl;
+#define BNXT_PTP_MSG_SYNC                      (1 << 0)
+#define BNXT_PTP_MSG_DELAY_REQ                 (1 << 1)
+#define BNXT_PTP_MSG_PDELAY_REQ                        (1 << 2)
+#define BNXT_PTP_MSG_PDELAY_RESP               (1 << 3)
+#define BNXT_PTP_MSG_FOLLOW_UP                 (1 << 8)
+#define BNXT_PTP_MSG_DELAY_RESP                        (1 << 9)
+#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP     (1 << 10)
+#define BNXT_PTP_MSG_ANNOUNCE                  (1 << 11)
+#define BNXT_PTP_MSG_SIGNALING                 (1 << 12)
+#define BNXT_PTP_MSG_MANAGEMENT                        (1 << 13)
+#define BNXT_PTP_MSG_EVENTS            (BNXT_PTP_MSG_SYNC |            \
+                                        BNXT_PTP_MSG_DELAY_REQ |       \
+                                        BNXT_PTP_MSG_PDELAY_REQ |      \
+                                        BNXT_PTP_MSG_PDELAY_RESP)
+       u8                      tx_tstamp_en:1;
+       int                     rx_filter;
+
+       u32                     refclk_regs[2];
+       u32                     refclk_mapped_regs[2];
+};
+
+#if BITS_PER_LONG == 32
+#define BNXT_READ_TIME64(ptp, dst, src)                \
+do {                                           \
+       spin_lock_bh(&(ptp)->ptp_lock);         \
+       (dst) = (src);                          \
+       spin_unlock_bh(&(ptp)->ptp_lock);       \
+} while (0)
+#else
+#define BNXT_READ_TIME64(ptp, dst, src)                \
+       ((dst) = READ_ONCE(src))
+#endif
+
+int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id);
+int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
+int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_ptp_start(struct bnxt *bp);
+int bnxt_ptp_init(struct bnxt *bp);
+void bnxt_ptp_clear(struct bnxt *bp);
+#endif
index eb00a21..7fa881e 100644 (file)
@@ -632,7 +632,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
        vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
        vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 
-       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
                                  FUNC_CFG_REQ_ENABLES_MRU |
                                  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
                                  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
@@ -645,7 +645,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 
        mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
        req.mru = cpu_to_le16(mtu);
-       req.mtu = cpu_to_le16(mtu);
+       req.admin_mtu = cpu_to_le16(mtu);
 
        req.num_rsscos_ctxs = cpu_to_le16(1);
        req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
index fcca023..41f7f07 100644 (file)
@@ -4296,3 +4296,4 @@ MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
 MODULE_ALIAS("platform:bcmgenet");
 MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: mdio-bcm-unimac");
index 61ea3ec..83ed10a 100644 (file)
@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
                return ret;
        }
 
-       spin_lock_bh(&adap->win0_lock);
+       /* We have to RESET the chip/firmware because we need the
+        * chip in uninitialized state for loading new PHY image.
+        * Otherwise, the running firmware will only store the PHY
+        * image in local RAM which will be lost after next reset.
+        */
+       ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
+       if (ret < 0) {
+               dev_err(adap->pdev_dev,
+                       "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
+                       ret);
+               return ret;
+       }
+
        ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
-       spin_unlock_bh(&adap->win0_lock);
-       if (ret)
-               dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
+       if (ret < 0) {
+               dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
+                       ret);
+               return ret;
+       }
 
-       return ret;
+       return 0;
 }
 
 static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
                                                   u32 ftid)
 {
        struct tid_info *t = &adap->tids;
-       struct filter_entry *f;
 
-       if (ftid < t->nhpftids)
-               f = &adap->tids.hpftid_tab[ftid];
-       else if (ftid < t->nftids)
-               f = &adap->tids.ftid_tab[ftid - t->nhpftids];
-       else
-               f = lookup_tid(&adap->tids, ftid);
+       if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
+               return &t->hpftid_tab[ftid - t->hpftid_base];
 
-       return f;
+       if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
+               return &t->ftid_tab[ftid - t->ftid_base];
+
+       return lookup_tid(t, ftid);
 }
 
 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
        filter_id = filter_info->loc_array[cmd->fs.location];
        f = cxgb4_get_filter_entry(adapter, filter_id);
 
+       if (f->fs.prio)
+               filter_id -= adapter->tids.hpftid_base;
+       else if (!f->fs.hash)
+               filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
        ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
        if (ret)
                goto err;
@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
 
        filter_info = &adapter->ethtool_filters->port[pi->port_id];
 
+       if (fs.prio)
+               tid += adapter->tids.hpftid_base;
+       else if (!fs.hash)
+               tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
+
        filter_info->loc_array[cmd->fs.location] = tid;
        set_bit(cmd->fs.location, filter_info->bmap);
        filter_info->in_use++;
index 22c9ac9..6260b3b 100644 (file)
@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
                                      WORD_MASK, f->fs.nat_lip[3] |
                                      f->fs.nat_lip[2] << 8 |
                                      f->fs.nat_lip[1] << 16 |
-                                     (u64)f->fs.nat_lip[0] << 25, 1);
+                                     (u64)f->fs.nat_lip[0] << 24, 1);
                }
        }
 
index 6479cee..9a2b166 100644 (file)
@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
 
        /* Load PHY Firmware onto adapter.
         */
-       spin_lock_bh(&adap->win0_lock);
        ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
                             (u8 *)phyf->data, phyf->size);
-       spin_unlock_bh(&adap->win0_lock);
        if (ret < 0)
                dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
                        -ret);
index 9e3ea5f..6606fb8 100644 (file)
@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
  *     @addr: the start address to write
  *     @n: length of data to write in bytes
  *     @data: the data to write
+ *     @byte_oriented: whether to store data as bytes or as words
  *
  *     Writes up to a page of data (256 bytes) to the serial flash starting
  *     at the given address.  All the data must be written to the same page.
+ *     If @byte_oriented is set the write data is stored as byte stream
+ *     (i.e. matches what on disk), otherwise in big-endian.
  */
 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
-                         unsigned int n, const u8 *data)
+                         unsigned int n, const u8 *data, bool byte_oriented)
 {
-       int ret;
-       u32 buf[64];
        unsigned int i, c, left, val, offset = addr & 0xff;
+       u32 buf[64];
+       int ret;
 
        if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
                return -EINVAL;
@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
            (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
                goto unlock;
 
-       for (left = n; left; left -= c) {
+       for (left = n; left; left -= c, data += c) {
                c = min(left, 4U);
-               for (val = 0, i = 0; i < c; ++i)
-                       val = (val << 8) + *data++;
+               for (val = 0, i = 0; i < c; ++i) {
+                       if (byte_oriented)
+                               val = (val << 8) + data[i];
+                       else
+                               val = (val << 8) + data[c - i - 1];
+               }
 
                ret = sf1_write(adapter, c, c != left, 1, val);
                if (ret)
@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
        t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
 
        /* Read the page to verify the write succeeded */
-       ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+       ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
+                           byte_oriented);
        if (ret)
                return ret;
 
@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
         */
        memcpy(first_page, fw_data, SF_PAGE_SIZE);
        ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
-       ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
+       ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
        if (ret)
                goto out;
 
@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
                addr += SF_PAGE_SIZE;
                fw_data += SF_PAGE_SIZE;
-               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
                if (ret)
                        goto out;
        }
 
-       ret = t4_write_flash(adap,
-                            fw_start + offsetof(struct fw_hdr, fw_ver),
-                            sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+       ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
+                            sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
+                            true);
 out:
        if (ret)
                dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
        /* Copy the supplied PHY Firmware image to the adapter memory location
         * allocated by the adapter firmware.
         */
+       spin_lock_bh(&adap->win0_lock);
        ret = t4_memory_rw(adap, win, mtype, maddr,
                           phy_fw_size, (__be32 *)phy_fw_data,
                           T4_MEMORY_WRITE);
+       spin_unlock_bh(&adap->win0_lock);
        if (ret)
                return ret;
 
@@ -10207,7 +10217,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
                        n = size - i;
                else
                        n = SF_PAGE_SIZE;
-               ret = t4_write_flash(adap, addr, n, cfg_data);
+               ret = t4_write_flash(adap, addr, n, cfg_data, true);
                if (ret)
                        goto out;
 
@@ -10676,13 +10686,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
        for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
                addr += SF_PAGE_SIZE;
                boot_data += SF_PAGE_SIZE;
-               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
+               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
+                                    false);
                if (ret)
                        goto out;
        }
 
        ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
-                            (const u8 *)header);
+                            (const u8 *)header, false);
 
 out:
        if (ret)
@@ -10757,7 +10768,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
        for (i = 0; i < size; i += SF_PAGE_SIZE) {
                n = min_t(u32, size - i, SF_PAGE_SIZE);
 
-               ret = t4_write_flash(adap, addr, n, cfg_data);
+               ret = t4_write_flash(adap, addr, n, cfg_data, false);
                if (ret)
                        goto out;
 
@@ -10769,7 +10780,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
        for (i = 0; i < npad; i++) {
                u8 data = 0;
 
-               ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
+               ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
+                                    false);
                if (ret)
                        goto out;
        }
index 46b0dba..7c99217 100644 (file)
@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
        struct ec_bhf_priv *priv = netdev_priv(net_dev);
 
        unregister_netdev(net_dev);
-       free_netdev(net_dev);
 
        pci_iounmap(dev, priv->dma_io);
        pci_iounmap(dev, priv->io);
+
+       free_netdev(net_dev);
+
        pci_release_regions(dev);
        pci_clear_master(dev);
        pci_disable_device(dev);
index b6eba29..7968568 100644 (file)
@@ -5897,6 +5897,7 @@ drv_cleanup:
 unmap_bars:
        be_unmap_pci_bars(adapter);
 free_netdev:
+       pci_disable_pcie_error_reporting(pdev);
        free_netdev(netdev);
 rel_reg:
        pci_release_regions(pdev);
index 05de37c..f3d12d0 100644 (file)
@@ -1625,7 +1625,7 @@ static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
        return 0;
 }
 
-static int dpaa2_switch_port_attr_set(struct net_device *netdev,
+static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
                                      const struct switchdev_attr *attr,
                                      struct netlink_ext_ack *extack)
 {
index 0602d5d..2e002e4 100644 (file)
@@ -467,6 +467,11 @@ struct bufdesc_ex {
  */
 #define FEC_QUIRK_NO_HARD_RESET                (1 << 18)
 
+/* i.MX6SX ENET IP supports multiple queues (3 queues), use this quirk to
+ * represents this ENET IP.
+ */
+#define FEC_QUIRK_HAS_MULTI_QUEUES     (1 << 19)
+
 struct bufdesc_prop {
        int qid;
        /* Address of Rx and Tx buffers */
index ad82cff..8aea707 100644 (file)
@@ -76,6 +76,8 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
 
 #define DRIVER_NAME    "fec"
 
+static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
+
 /* Pause frame feild and FIFO threshold */
 #define FEC_ENET_FCE   (1 << 5)
 #define FEC_ENET_RSEM_V        0x84
@@ -122,7 +124,7 @@ static const struct fec_devinfo fec_imx6x_info = {
                  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
                  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
                  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
-                 FEC_QUIRK_CLEAR_SETUP_MII,
+                 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
 };
 
 static const struct fec_devinfo fec_imx6ul_info = {
@@ -421,6 +423,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                                estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
                                estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
                        ebdp->cbd_bdu = 0;
                        ebdp->cbd_esc = cpu_to_fec32(estatus);
                }
@@ -954,7 +957,7 @@ fec_restart(struct net_device *ndev)
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
         * instead of reset MAC itself.
         */
-       if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+       if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
            ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
                writel(0, fep->hwp + FEC_ECNTRL);
        } else {
@@ -1165,7 +1168,7 @@ fec_stop(struct net_device *ndev)
         * instead of reset MAC itself.
         */
        if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
-               if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+               if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
                        writel(0, fep->hwp + FEC_ECNTRL);
                } else {
                        writel(1, fep->hwp + FEC_ECNTRL);
@@ -2570,7 +2573,7 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
 
        writel(tx_itr, fep->hwp + FEC_TXIC0);
        writel(rx_itr, fep->hwp + FEC_RXIC0);
-       if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+       if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
                writel(tx_itr, fep->hwp + FEC_TXIC1);
                writel(rx_itr, fep->hwp + FEC_RXIC1);
                writel(tx_itr, fep->hwp + FEC_TXIC2);
@@ -3239,10 +3242,40 @@ static int fec_set_features(struct net_device *netdev,
        return 0;
 }
 
+static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
+{
+       struct vlan_ethhdr *vhdr;
+       unsigned short vlan_TCI = 0;
+
+       if (skb->protocol == htons(ETH_P_ALL)) {
+               vhdr = (struct vlan_ethhdr *)(skb->data);
+               vlan_TCI = ntohs(vhdr->h_vlan_TCI);
+       }
+
+       return vlan_TCI;
+}
+
+static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+                                struct net_device *sb_dev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       u16 vlan_tag;
+
+       if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+               return netdev_pick_tx(ndev, skb, NULL);
+
+       vlan_tag = fec_enet_get_raw_vlan_tci(skb);
+       if (!vlan_tag)
+               return vlan_tag;
+
+       return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
+}
+
 static const struct net_device_ops fec_netdev_ops = {
        .ndo_open               = fec_enet_open,
        .ndo_stop               = fec_enet_close,
        .ndo_start_xmit         = fec_enet_start_xmit,
+       .ndo_select_queue       = fec_enet_select_queue,
        .ndo_set_rx_mode        = set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = fec_timeout,
@@ -3371,7 +3404,7 @@ static int fec_enet_init(struct net_device *ndev)
                fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
        }
 
-       if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+       if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
                fep->tx_align = 0;
                fep->rx_align = 0x3f;
        }
index 1753807..d71eac7 100644 (file)
@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
 {
        struct fec_enet_private *fep =
                container_of(cc, struct fec_enet_private, cc);
-       const struct platform_device_id *id_entry =
-               platform_get_device_id(fep->pdev);
        u32 tempval;
 
        tempval = readl(fep->hwp + FEC_ATIME_CTRL);
        tempval |= FEC_T_CTRL_CAPTURE;
        writel(tempval, fep->hwp + FEC_ATIME_CTRL);
 
-       if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+       if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
                udelay(1);
 
        return readl(fep->hwp + FEC_ATIME);
@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
        fep->ptp_caps.enable = fec_ptp_enable;
 
        fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+       if (!fep->cycle_speed) {
+               fep->cycle_speed = NSEC_PER_SEC;
+               dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+       }
        fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
 
        spin_lock_init(&fep->tmreg_lock);
index b8f04d0..8641a00 100644 (file)
@@ -17,7 +17,7 @@ if NET_VENDOR_GOOGLE
 
 config GVE
        tristate "Google Virtual NIC (gVNIC) support"
-       depends on PCI_MSI
+       depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
        help
          This driver supports Google Virtual NIC (gVNIC)"
 
index 3354ce4..b9a6be7 100644 (file)
@@ -1,4 +1,4 @@
 # Makefile for the Google virtual Ethernet (gve) driver
 
 obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_rx.o gve_ethtool.o gve_adminq.o
+gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o
index daf07c0..1d3188e 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
  * Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #ifndef _GVE_H_
@@ -11,7 +11,9 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/u64_stats_sync.h>
+
 #include "gve_desc.h"
+#include "gve_desc_dqo.h"
 
 #ifndef PCI_VENDOR_ID_GOOGLE
 #define PCI_VENDOR_ID_GOOGLE   0x1ae0
 
 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
 
+/* PTYPEs are always 10 bits. */
+#define GVE_NUM_PTYPES 1024
+
+#define GVE_RX_BUFFER_SIZE_DQO 2048
+
 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
 struct gve_rx_desc_queue {
        struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -51,7 +58,8 @@ struct gve_rx_desc_queue {
 struct gve_rx_slot_page_info {
        struct page *page;
        void *page_address;
-       u8 page_offset; /* flipped to second half? */
+       u32 page_offset; /* offset to write to in page */
+       int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
        u8 can_flip;
 };
 
@@ -76,17 +84,117 @@ struct gve_rx_data_queue {
 
 struct gve_priv;
 
-/* An RX ring that contains a power-of-two sized desc and data ring. */
+/* RX buffer queue for posting buffers to HW.
+ * Each RX (completion) queue has a corresponding buffer queue.
+ */
+struct gve_rx_buf_queue_dqo {
+       struct gve_rx_desc_dqo *desc_ring;
+       dma_addr_t bus;
+       u32 head; /* Pointer to start cleaning buffers at. */
+       u32 tail; /* Last posted buffer index + 1 */
+       u32 mask; /* Mask for indices to the size of the ring */
+};
+
+/* RX completion queue to receive packets from HW. */
+struct gve_rx_compl_queue_dqo {
+       struct gve_rx_compl_desc_dqo *desc_ring;
+       dma_addr_t bus;
+
+       /* Number of slots which did not have a buffer posted yet. We should not
+        * post more buffers than the queue size to avoid HW overrunning the
+        * queue.
+        */
+       int num_free_slots;
+
+       /* HW uses a "generation bit" to notify SW of new descriptors. When a
+        * descriptor's generation bit is different from the current generation,
+        * that descriptor is ready to be consumed by SW.
+        */
+       u8 cur_gen_bit;
+
+       /* Pointer into desc_ring where the next completion descriptor will be
+        * received.
+        */
+       u32 head;
+       u32 mask; /* Mask for indices to the size of the ring */
+};
+
+/* Stores state for tracking buffers posted to HW */
+struct gve_rx_buf_state_dqo {
+       /* The page posted to HW. */
+       struct gve_rx_slot_page_info page_info;
+
+       /* The DMA address corresponding to `page_info`. */
+       dma_addr_t addr;
+
+       /* Last offset into the page when it only had a single reference, at
+        * which point every other offset is free to be reused.
+        */
+       u32 last_single_ref_offset;
+
+       /* Linked list index to next element in the list, or -1 if none */
+       s16 next;
+};
+
+/* `head` and `tail` are indices into an array, or -1 if empty. */
+struct gve_index_list {
+       s16 head;
+       s16 tail;
+};
+
+/* Contains datapath state used to represent an RX queue. */
 struct gve_rx_ring {
        struct gve_priv *gve;
-       struct gve_rx_desc_queue desc;
-       struct gve_rx_data_queue data;
+       union {
+               /* GQI fields */
+               struct {
+                       struct gve_rx_desc_queue desc;
+                       struct gve_rx_data_queue data;
+
+                       /* threshold for posting new buffs and descs */
+                       u32 db_threshold;
+               };
+
+               /* DQO fields. */
+               struct {
+                       struct gve_rx_buf_queue_dqo bufq;
+                       struct gve_rx_compl_queue_dqo complq;
+
+                       struct gve_rx_buf_state_dqo *buf_states;
+                       u16 num_buf_states;
+
+                       /* Linked list of gve_rx_buf_state_dqo. Index into
+                        * buf_states, or -1 if empty.
+                        */
+                       s16 free_buf_states;
+
+                       /* Linked list of gve_rx_buf_state_dqo. Indexes into
+                        * buf_states, or -1 if empty.
+                        *
+                        * This list contains buf_states which are pointing to
+                        * valid buffers.
+                        *
+                        * We use a FIFO here in order to increase the
+                        * probability that buffers can be reused by increasing
+                        * the time between usages.
+                        */
+                       struct gve_index_list recycled_buf_states;
+
+                       /* Linked list of gve_rx_buf_state_dqo. Indexes into
+                        * buf_states, or -1 if empty.
+                        *
+                        * This list contains buf_states which have buffers
+                        * which cannot be reused yet.
+                        */
+                       struct gve_index_list used_buf_states;
+               } dqo;
+       };
+
        u64 rbytes; /* free-running bytes received */
        u64 rpackets; /* free-running packets received */
        u32 cnt; /* free-running total number of completed packets */
        u32 fill_cnt; /* free-running total number of descs and buffs posted */
        u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
-       u32 db_threshold; /* threshold for posting new buffs and descs */
        u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
        u64 rx_copied_pkt; /* free-running total number of copied packets */
        u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
@@ -97,6 +205,10 @@ struct gve_rx_ring {
        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
        dma_addr_t q_resources_bus; /* dma address for the queue resources */
        struct u64_stats_sync statss; /* sync stats for 32bit archs */
+
+       /* head and tail of skb chain for the current packet or NULL if none */
+       struct sk_buff *skb_head;
+       struct sk_buff *skb_tail;
 };
 
 /* A TX desc ring entry */
@@ -137,23 +249,161 @@ struct gve_tx_fifo {
        struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
 };
 
-/* A TX ring that contains a power-of-two sized desc ring and a FIFO buffer */
+/* TX descriptor for DQO format */
+union gve_tx_desc_dqo {
+       struct gve_tx_pkt_desc_dqo pkt;
+       struct gve_tx_tso_context_desc_dqo tso_ctx;
+       struct gve_tx_general_context_desc_dqo general_ctx;
+};
+
+enum gve_packet_state {
+       /* Packet is in free list, available to be allocated.
+        * This should always be zero since state is not explicitly initialized.
+        */
+       GVE_PACKET_STATE_UNALLOCATED,
+       /* Packet is expecting a regular data completion or miss completion */
+       GVE_PACKET_STATE_PENDING_DATA_COMPL,
+       /* Packet has received a miss completion and is expecting a
+        * re-injection completion.
+        */
+       GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
+       /* No valid completion received within the specified timeout. */
+       GVE_PACKET_STATE_TIMED_OUT_COMPL,
+};
+
+struct gve_tx_pending_packet_dqo {
+       struct sk_buff *skb; /* skb for this packet */
+
+       /* 0th element corresponds to the linear portion of `skb`, should be
+        * unmapped with `dma_unmap_single`.
+        *
+        * All others correspond to `skb`'s frags and should be unmapped with
+        * `dma_unmap_page`.
+        */
+       struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
+       u16 num_bufs;
+
+       /* Linked list index to next element in the list, or -1 if none */
+       s16 next;
+
+       /* Linked list index to prev element in the list, or -1 if none.
+        * Used for tracking either outstanding miss completions or prematurely
+        * freed packets.
+        */
+       s16 prev;
+
+       /* Identifies the current state of the packet as defined in
+        * `enum gve_packet_state`.
+        */
+       u8 state;
+
+       /* If packet is an outstanding miss completion, then the packet is
+        * freed if the corresponding re-injection completion is not received
+        * before kernel jiffies exceeds timeout_jiffies.
+        */
+       unsigned long timeout_jiffies;
+};
+
+/* Contains datapath state used to represent a TX queue. */
 struct gve_tx_ring {
        /* Cacheline 0 -- Accessed & dirtied during transmit */
-       struct gve_tx_fifo tx_fifo;
-       u32 req; /* driver tracked head pointer */
-       u32 done; /* driver tracked tail pointer */
+       union {
+               /* GQI fields */
+               struct {
+                       struct gve_tx_fifo tx_fifo;
+                       u32 req; /* driver tracked head pointer */
+                       u32 done; /* driver tracked tail pointer */
+               };
+
+               /* DQO fields. */
+               struct {
+                       /* Linked list of gve_tx_pending_packet_dqo. Index into
+                        * pending_packets, or -1 if empty.
+                        *
+                        * This is a consumer list owned by the TX path. When it
+                        * runs out, the producer list is stolen from the
+                        * completion handling path
+                        * (dqo_compl.free_pending_packets).
+                        */
+                       s16 free_pending_packets;
+
+                       /* Cached value of `dqo_compl.hw_tx_head` */
+                       u32 head;
+                       u32 tail; /* Last posted buffer index + 1 */
+
+                       /* Index of the last descriptor with "report event" bit
+                        * set.
+                        */
+                       u32 last_re_idx;
+               } dqo_tx;
+       };
 
        /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
-       __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */
+       union {
+               /* GQI fields */
+               struct {
+                       /* NIC tail pointer */
+                       __be32 last_nic_done;
+               };
+
+               /* DQO fields. */
+               struct {
+                       u32 head; /* Last read on compl_desc */
+
+                       /* Tracks the current gen bit of compl_q */
+                       u8 cur_gen_bit;
+
+                       /* Linked list of gve_tx_pending_packet_dqo. Index into
+                        * pending_packets, or -1 if empty.
+                        *
+                        * This is the producer list, owned by the completion
+                        * handling path. When the consumer list
+                        * (dqo_tx.free_pending_packets) is runs out, this list
+                        * will be stolen.
+                        */
+                       atomic_t free_pending_packets;
+
+                       /* Last TX ring index fetched by HW */
+                       atomic_t hw_tx_head;
+
+                       /* List to track pending packets which received a miss
+                        * completion but not a corresponding reinjection.
+                        */
+                       struct gve_index_list miss_completions;
+
+                       /* List to track pending packets that were completed
+                        * before receiving a valid completion because they
+                        * reached a specified timeout.
+                        */
+                       struct gve_index_list timed_out_completions;
+               } dqo_compl;
+       } ____cacheline_aligned;
        u64 pkt_done; /* free-running - total packets completed */
        u64 bytes_done; /* free-running - total bytes completed */
        u64 dropped_pkt; /* free-running - total packets dropped */
        u64 dma_mapping_error; /* count of dma mapping errors */
 
        /* Cacheline 2 -- Read-mostly fields */
-       union gve_tx_desc *desc ____cacheline_aligned;
-       struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */
+       union {
+               /* GQI fields */
+               struct {
+                       union gve_tx_desc *desc;
+
+                       /* Maps 1:1 to a desc */
+                       struct gve_tx_buffer_state *info;
+               };
+
+               /* DQO fields. */
+               struct {
+                       union gve_tx_desc_dqo *tx_ring;
+                       struct gve_tx_compl_desc *compl_ring;
+
+                       struct gve_tx_pending_packet_dqo *pending_packets;
+                       s16 num_pending_packets;
+
+                       u32 complq_mask; /* complq size is complq_mask + 1 */
+               } dqo;
+       } ____cacheline_aligned;
        struct netdev_queue *netdev_txq;
        struct gve_queue_resources *q_resources; /* head and tail pointer idx */
        struct device *dev;
@@ -167,6 +417,7 @@ struct gve_tx_ring {
        u32 ntfy_id; /* notification block index */
        dma_addr_t bus; /* dma address of the descr ring */
        dma_addr_t q_resources_bus; /* dma address of the queue resources */
+       dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
        struct u64_stats_sync statss; /* sync stats for 32bit archs */
 } ____cacheline_aligned;
 
@@ -194,6 +445,31 @@ struct gve_qpl_config {
        unsigned long *qpl_id_map; /* bitmap of used qpl ids */
 };
 
+struct gve_options_dqo_rda {
+       u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
+       u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
+};
+
+struct gve_ptype {
+       u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
+       u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
+};
+
+struct gve_ptype_lut {
+       struct gve_ptype ptypes[GVE_NUM_PTYPES];
+};
+
+/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
+ * when the entire configure_device_resources command is zeroed out and the
+ * queue_format is not specified.
+ */
+enum gve_queue_format {
+       GVE_QUEUE_FORMAT_UNSPECIFIED    = 0x0,
+       GVE_GQI_RDA_FORMAT              = 0x1,
+       GVE_GQI_QPL_FORMAT              = 0x2,
+       GVE_DQO_RDA_FORMAT              = 0x3,
+};
+
 struct gve_priv {
        struct net_device *dev;
        struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -216,7 +492,6 @@ struct gve_priv {
        u64 num_registered_pages; /* num pages registered with NIC */
        u32 rx_copybreak; /* copy packets smaller than this */
        u16 default_num_queues; /* default num queues to set up */
-       u8 raw_addressing; /* 1 if this dev supports raw addressing, 0 otherwise */
 
        struct gve_queue_config tx_cfg;
        struct gve_queue_config rx_cfg;
@@ -251,6 +526,7 @@ struct gve_priv {
        u32 adminq_set_driver_parameter_cnt;
        u32 adminq_report_stats_cnt;
        u32 adminq_report_link_speed_cnt;
+       u32 adminq_get_ptype_map_cnt;
 
        /* Global stats */
        u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -275,6 +551,14 @@ struct gve_priv {
 
        /* Gvnic device link speed from hypervisor. */
        u64 link_speed;
+
+       struct gve_options_dqo_rda options_dqo_rda;
+       struct gve_ptype_lut *ptype_lut_dqo;
+
+       /* Must be a power of two. */
+       int data_buffer_size_dqo;
+
+       enum gve_queue_format queue_format;
 };
 
 enum gve_service_task_flags_bit {
@@ -454,14 +738,20 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
  */
 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
 {
-       return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues;
+       if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+               return 0;
+
+       return priv->tx_cfg.num_queues;
 }
 
 /* Returns the number of rx queue page lists
  */
 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
 {
-       return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues;
+       if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+               return 0;
+
+       return priv->rx_cfg.num_queues;
 }
 
 /* Returns a pointer to the next available tx qpl in the list of qpls
@@ -515,6 +805,12 @@ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
                return DMA_FROM_DEVICE;
 }
 
+static inline bool gve_is_gqi(struct gve_priv *priv)
+{
+       return priv->queue_format == GVE_GQI_RDA_FORMAT ||
+               priv->queue_format == GVE_GQI_QPL_FORMAT;
+}
+
 /* buffers */
 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
                   struct page **page, dma_addr_t *dma,
@@ -525,14 +821,14 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
 bool gve_tx_poll(struct gve_notify_block *block, int budget);
 int gve_tx_alloc_rings(struct gve_priv *priv);
-void gve_tx_free_rings(struct gve_priv *priv);
+void gve_tx_free_rings_gqi(struct gve_priv *priv);
 __be32 gve_tx_load_event_counter(struct gve_priv *priv,
                                 struct gve_tx_ring *tx);
 /* rx handling */
 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
 bool gve_rx_poll(struct gve_notify_block *block, int budget);
 int gve_rx_alloc_rings(struct gve_priv *priv);
-void gve_rx_free_rings(struct gve_priv *priv);
+void gve_rx_free_rings_gqi(struct gve_priv *priv);
 bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
                       netdev_features_t feat);
 /* Reset */
index 53864f2..5bb56b4 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include <linux/etherdevice.h>
@@ -18,6 +18,8 @@
 "Expected: length=%d, feature_mask=%x.\n" \
 "Actual: length=%d, feature_mask=%x.\n"
 
+#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
+
 static
 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
                                              struct gve_device_option *option)
@@ -33,28 +35,81 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
 static
 void gve_parse_device_option(struct gve_priv *priv,
                             struct gve_device_descriptor *device_descriptor,
-                            struct gve_device_option *option)
+                            struct gve_device_option *option,
+                            struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+                            struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+                            struct gve_device_option_dqo_rda **dev_op_dqo_rda)
 {
+       u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
        u16 option_length = be16_to_cpu(option->option_length);
        u16 option_id = be16_to_cpu(option->option_id);
 
+       /* If the length or feature mask doesn't match, continue without
+        * enabling the feature.
+        */
        switch (option_id) {
-       case GVE_DEV_OPT_ID_RAW_ADDRESSING:
-               /* If the length or feature mask doesn't match,
-                * continue without enabling the feature.
-                */
-               if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
-                   option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
-                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
-                                GVE_DEV_OPT_LEN_RAW_ADDRESSING,
-                                cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
-                                option_length, option->feat_mask);
-                       priv->raw_addressing = 0;
-               } else {
-                       dev_info(&priv->pdev->dev,
-                                "Raw addressing device option enabled.\n");
-                       priv->raw_addressing = 1;
+       case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
+               if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "Raw Addressing",
+                                GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               dev_info(&priv->pdev->dev,
+                        "Gqi raw addressing device option enabled.\n");
+               priv->queue_format = GVE_GQI_RDA_FORMAT;
+               break;
+       case GVE_DEV_OPT_ID_GQI_RDA:
+               if (option_length < sizeof(**dev_op_gqi_rda) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_gqi_rda)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
+               }
+               *dev_op_gqi_rda = (void *)(option + 1);
+               break;
+       case GVE_DEV_OPT_ID_GQI_QPL:
+               if (option_length < sizeof(**dev_op_gqi_qpl) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_gqi_qpl)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
+               }
+               *dev_op_gqi_qpl = (void *)(option + 1);
+               break;
+       case GVE_DEV_OPT_ID_DQO_RDA:
+               if (option_length < sizeof(**dev_op_dqo_rda) ||
+                   req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
+                       dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+                                "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
+                                GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
+                                option_length, req_feat_mask);
+                       break;
+               }
+
+               if (option_length > sizeof(**dev_op_dqo_rda)) {
+                       dev_warn(&priv->pdev->dev,
+                                GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
                }
+               *dev_op_dqo_rda = (void *)(option + 1);
                break;
        default:
                /* If we don't recognize the option just continue
@@ -65,6 +120,39 @@ void gve_parse_device_option(struct gve_priv *priv,
        }
 }
 
+/* Process all device options for a given describe device call. */
+static int
+gve_process_device_options(struct gve_priv *priv,
+                          struct gve_device_descriptor *descriptor,
+                          struct gve_device_option_gqi_rda **dev_op_gqi_rda,
+                          struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+                          struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+{
+       const int num_options = be16_to_cpu(descriptor->num_device_options);
+       struct gve_device_option *dev_opt;
+       int i;
+
+       /* The options struct directly follows the device descriptor. */
+       dev_opt = (void *)(descriptor + 1);
+       for (i = 0; i < num_options; i++) {
+               struct gve_device_option *next_opt;
+
+               next_opt = gve_get_next_option(descriptor, dev_opt);
+               if (!next_opt) {
+                       dev_err(&priv->dev->dev,
+                               "options exceed device_descriptor's total length.\n");
+                       return -EINVAL;
+               }
+
+               gve_parse_device_option(priv, descriptor, dev_opt,
+                                       dev_op_gqi_rda, dev_op_gqi_qpl,
+                                       dev_op_dqo_rda);
+               dev_opt = next_opt;
+       }
+
+       return 0;
+}
+
 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
 {
        priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
@@ -88,6 +176,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
        priv->adminq_set_driver_parameter_cnt = 0;
        priv->adminq_report_stats_cnt = 0;
        priv->adminq_report_link_speed_cnt = 0;
+       priv->adminq_get_ptype_map_cnt = 0;
 
        /* Setup Admin queue with the device */
        iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
@@ -293,6 +382,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
        case GVE_ADMINQ_REPORT_LINK_SPEED:
                priv->adminq_report_link_speed_cnt++;
                break;
+       case GVE_ADMINQ_GET_PTYPE_MAP:
+               priv->adminq_get_ptype_map_cnt++;
+               break;
        default:
                dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
        }
@@ -305,7 +397,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
  * The caller is also responsible for making sure there are no commands
  * waiting to be executed.
  */
-static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
+static int gve_adminq_execute_cmd(struct gve_priv *priv,
+                                 union gve_adminq_command *cmd_orig)
 {
        u32 tail, head;
        int err;
@@ -350,6 +443,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
                .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
                .ntfy_blk_msix_base_idx =
                                        cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
+               .queue_format = priv->queue_format,
        };
 
        return gve_adminq_execute_cmd(priv, &cmd);
@@ -369,27 +463,32 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
 {
        struct gve_tx_ring *tx = &priv->tx[queue_index];
        union gve_adminq_command cmd;
-       u32 qpl_id;
-       int err;
 
-       qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
        cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
                .queue_id = cpu_to_be32(queue_index),
-               .reserved = 0,
                .queue_resources_addr =
                        cpu_to_be64(tx->q_resources_bus),
                .tx_ring_addr = cpu_to_be64(tx->bus),
-               .queue_page_list_id = cpu_to_be32(qpl_id),
                .ntfy_id = cpu_to_be32(tx->ntfy_id),
        };
 
-       err = gve_adminq_issue_cmd(priv, &cmd);
-       if (err)
-               return err;
+       if (gve_is_gqi(priv)) {
+               u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
+                       GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
+
+               cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+       } else {
+               cmd.create_tx_queue.tx_ring_size =
+                       cpu_to_be16(priv->tx_desc_cnt);
+               cmd.create_tx_queue.tx_comp_ring_addr =
+                       cpu_to_be64(tx->complq_bus_dqo);
+               cmd.create_tx_queue.tx_comp_ring_size =
+                       cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries);
+       }
 
-       return 0;
+       return gve_adminq_issue_cmd(priv, &cmd);
 }
 
 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
@@ -410,28 +509,41 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
 {
        struct gve_rx_ring *rx = &priv->rx[queue_index];
        union gve_adminq_command cmd;
-       u32 qpl_id;
-       int err;
 
-       qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
        memset(&cmd, 0, sizeof(cmd));
        cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
        cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
                .queue_id = cpu_to_be32(queue_index),
-               .index = cpu_to_be32(queue_index),
-               .reserved = 0,
                .ntfy_id = cpu_to_be32(rx->ntfy_id),
                .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
-               .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
-               .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
-               .queue_page_list_id = cpu_to_be32(qpl_id),
        };
 
-       err = gve_adminq_issue_cmd(priv, &cmd);
-       if (err)
-               return err;
+       if (gve_is_gqi(priv)) {
+               u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
+                       GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
+
+               cmd.create_rx_queue.rx_desc_ring_addr =
+                       cpu_to_be64(rx->desc.bus),
+               cmd.create_rx_queue.rx_data_ring_addr =
+                       cpu_to_be64(rx->data.data_bus),
+               cmd.create_rx_queue.index = cpu_to_be32(queue_index);
+               cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+       } else {
+               cmd.create_rx_queue.rx_ring_size =
+                       cpu_to_be16(priv->rx_desc_cnt);
+               cmd.create_rx_queue.rx_desc_ring_addr =
+                       cpu_to_be64(rx->dqo.complq.bus);
+               cmd.create_rx_queue.rx_data_ring_addr =
+                       cpu_to_be64(rx->dqo.bufq.bus);
+               cmd.create_rx_queue.packet_buffer_size =
+                       cpu_to_be16(priv->data_buffer_size_dqo);
+               cmd.create_rx_queue.rx_buff_ring_size =
+                       cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries);
+               cmd.create_rx_queue.enable_rsc =
+                       !!(priv->dev->features & NETIF_F_LRO);
+       }
 
-       return 0;
+       return gve_adminq_issue_cmd(priv, &cmd);
 }
 
 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
@@ -512,17 +624,51 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
        return gve_adminq_kick_and_wait(priv);
 }
 
+static int gve_set_desc_cnt(struct gve_priv *priv,
+                           struct gve_device_descriptor *descriptor)
+{
+       priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+       if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
+               dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
+                       priv->tx_desc_cnt);
+               return -EINVAL;
+       }
+       priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+       if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
+           < PAGE_SIZE) {
+               dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
+                       priv->rx_desc_cnt);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int
+gve_set_desc_cnt_dqo(struct gve_priv *priv,
+                    const struct gve_device_descriptor *descriptor,
+                    const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
+{
+       priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+       priv->options_dqo_rda.tx_comp_ring_entries =
+               be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
+       priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+       priv->options_dqo_rda.rx_buff_ring_entries =
+               be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
+
+       return 0;
+}
+
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
+       struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
+       struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
+       struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
        struct gve_device_descriptor *descriptor;
-       struct gve_device_option *dev_opt;
        union gve_adminq_command cmd;
        dma_addr_t descriptor_bus;
-       u16 num_options;
        int err = 0;
        u8 *mac;
        u16 mtu;
-       int i;
 
        memset(&cmd, 0, sizeof(cmd));
        descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
@@ -540,21 +686,41 @@ int gve_adminq_describe_device(struct gve_priv *priv)
        if (err)
                goto free_device_descriptor;
 
-       priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
-       if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
-               dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
-               err = -EINVAL;
+       err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
+                                        &dev_op_gqi_qpl, &dev_op_dqo_rda);
+       if (err)
                goto free_device_descriptor;
+
+       /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
+        * is not set to GqiRda, choose the queue format in a priority order:
+        * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
+        */
+       if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
+       } else if (dev_op_dqo_rda) {
+               priv->queue_format = GVE_DQO_RDA_FORMAT;
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with DQO RDA queue format.\n");
+       } else if (dev_op_gqi_rda) {
+               priv->queue_format = GVE_GQI_RDA_FORMAT;
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
+       } else {
+               priv->queue_format = GVE_GQI_QPL_FORMAT;
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI QPL queue format.\n");
        }
-       priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
-       if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
-           < PAGE_SIZE ||
-           priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
-           < PAGE_SIZE) {
-               dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
-               err = -EINVAL;
-               goto free_device_descriptor;
+       if (gve_is_gqi(priv)) {
+               err = gve_set_desc_cnt(priv, descriptor);
+       } else {
+               /* DQO supports LRO. */
+               priv->dev->hw_features |= NETIF_F_LRO;
+               err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
        }
+       if (err)
+               goto free_device_descriptor;
+
        priv->max_registered_pages =
                                be64_to_cpu(descriptor->max_registered_pages);
        mtu = be16_to_cpu(descriptor->mtu);
@@ -570,32 +736,16 @@ int gve_adminq_describe_device(struct gve_priv *priv)
        dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
        priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
        priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-       if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
+
+       if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
                dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
                        priv->rx_data_slot_cnt);
                priv->rx_desc_cnt = priv->rx_data_slot_cnt;
        }
        priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
-       dev_opt = (void *)(descriptor + 1);
-
-       num_options = be16_to_cpu(descriptor->num_device_options);
-       for (i = 0; i < num_options; i++) {
-               struct gve_device_option *next_opt;
-
-               next_opt = gve_get_next_option(descriptor, dev_opt);
-               if (!next_opt) {
-                       dev_err(&priv->dev->dev,
-                               "options exceed device_descriptor's total length.\n");
-                       err = -EINVAL;
-                       goto free_device_descriptor;
-               }
-
-               gve_parse_device_option(priv, descriptor, dev_opt);
-               dev_opt = next_opt;
-       }
 
 free_device_descriptor:
-       dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
+       dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
                          descriptor_bus);
        return err;
 }
@@ -701,3 +851,41 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
                          link_speed_region_bus);
        return err;
 }
+
+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+                                struct gve_ptype_lut *ptype_lut)
+{
+       struct gve_ptype_map *ptype_map;
+       union gve_adminq_command cmd;
+       dma_addr_t ptype_map_bus;
+       int err = 0;
+       int i;
+
+       memset(&cmd, 0, sizeof(cmd));
+       ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
+                                      &ptype_map_bus, GFP_KERNEL);
+       if (!ptype_map)
+               return -ENOMEM;
+
+       cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
+       cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
+               .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
+               .ptype_map_addr = cpu_to_be64(ptype_map_bus),
+       };
+
+       err = gve_adminq_execute_cmd(priv, &cmd);
+       if (err)
+               goto err;
+
+       /* Populate ptype_lut. */
+       for (i = 0; i < GVE_NUM_PTYPES; i++) {
+               ptype_lut->ptypes[i].l3_type =
+                       ptype_map->ptypes[i].l3_type;
+               ptype_lut->ptypes[i].l4_type =
+                       ptype_map->ptypes[i].l4_type;
+       }
+err:
+       dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
+                         ptype_map_bus);
+       return err;
+}
index d320c2f..47c3d8f 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
  * Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #ifndef _GVE_ADMINQ_H
@@ -22,7 +22,8 @@ enum gve_adminq_opcodes {
        GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
        GVE_ADMINQ_SET_DRIVER_PARAMETER         = 0xB,
        GVE_ADMINQ_REPORT_STATS                 = 0xC,
-       GVE_ADMINQ_REPORT_LINK_SPEED    = 0xD
+       GVE_ADMINQ_REPORT_LINK_SPEED            = 0xD,
+       GVE_ADMINQ_GET_PTYPE_MAP                = 0xE,
 };
 
 /* Admin queue status codes */
@@ -82,14 +83,54 @@ static_assert(sizeof(struct gve_device_descriptor) == 40);
 struct gve_device_option {
        __be16 option_id;
        __be16 option_length;
-       __be32 feat_mask;
+       __be32 required_features_mask;
 };
 
 static_assert(sizeof(struct gve_device_option) == 8);
 
-#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1
-#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0
-#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0
+struct gve_device_option_gqi_rda {
+       __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
+
+struct gve_device_option_gqi_qpl {
+       __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
+
+struct gve_device_option_dqo_rda {
+       __be32 supported_features_mask;
+       __be16 tx_comp_ring_entries;
+       __be16 rx_buff_ring_entries;
+};
+
+static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
+
+/* Terminology:
+ *
+ * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
+ *       mapped and read/updated by the device.
+ *
+ * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
+ *       the device for read/write and data is copied from/to SKBs.
+ */
+enum gve_dev_opt_id {
+       GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+       GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+       GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+       GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+};
+
+enum gve_dev_opt_req_feat_mask {
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+       GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+};
+
+#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
 
 struct gve_adminq_configure_device_resources {
        __be64 counter_array;
@@ -98,9 +139,11 @@ struct gve_adminq_configure_device_resources {
        __be32 num_irq_dbs;
        __be32 irq_db_stride;
        __be32 ntfy_blk_msix_base_idx;
+       u8 queue_format;
+       u8 padding[7];
 };
 
-static_assert(sizeof(struct gve_adminq_configure_device_resources) == 32);
+static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
 
 struct gve_adminq_register_page_list {
        __be32 page_list_id;
@@ -125,9 +168,13 @@ struct gve_adminq_create_tx_queue {
        __be64 tx_ring_addr;
        __be32 queue_page_list_id;
        __be32 ntfy_id;
+       __be64 tx_comp_ring_addr;
+       __be16 tx_ring_size;
+       __be16 tx_comp_ring_size;
+       u8 padding[4];
 };
 
-static_assert(sizeof(struct gve_adminq_create_tx_queue) == 32);
+static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
 
 struct gve_adminq_create_rx_queue {
        __be32 queue_id;
@@ -138,10 +185,14 @@ struct gve_adminq_create_rx_queue {
        __be64 rx_desc_ring_addr;
        __be64 rx_data_ring_addr;
        __be32 queue_page_list_id;
-       u8 padding[4];
+       __be16 rx_ring_size;
+       __be16 packet_buffer_size;
+       __be16 rx_buff_ring_size;
+       u8 enable_rsc;
+       u8 padding[5];
 };
 
-static_assert(sizeof(struct gve_adminq_create_rx_queue) == 48);
+static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
 
 /* Queue resources that are shared with the device */
 struct gve_queue_resources {
@@ -226,6 +277,41 @@ enum gve_stat_names {
        RX_DROPS_INVALID_CHECKSUM       = 68,
 };
 
+enum gve_l3_type {
+       /* Must be zero so zero initialized LUT is unknown. */
+       GVE_L3_TYPE_UNKNOWN = 0,
+       GVE_L3_TYPE_OTHER,
+       GVE_L3_TYPE_IPV4,
+       GVE_L3_TYPE_IPV6,
+};
+
+enum gve_l4_type {
+       /* Must be zero so zero initialized LUT is unknown. */
+       GVE_L4_TYPE_UNKNOWN = 0,
+       GVE_L4_TYPE_OTHER,
+       GVE_L4_TYPE_TCP,
+       GVE_L4_TYPE_UDP,
+       GVE_L4_TYPE_ICMP,
+       GVE_L4_TYPE_SCTP,
+};
+
+/* These are control path types for PTYPE which are the same as the data path
+ * types.
+ */
+struct gve_ptype_entry {
+       u8 l3_type;
+       u8 l4_type;
+};
+
+struct gve_ptype_map {
+       struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+};
+
+struct gve_adminq_get_ptype_map {
+       __be64 ptype_map_len;
+       __be64 ptype_map_addr;
+};
+
 union gve_adminq_command {
        struct {
                __be32 opcode;
@@ -243,6 +329,7 @@ union gve_adminq_command {
                        struct gve_adminq_set_driver_parameter set_driver_param;
                        struct gve_adminq_report_stats report_stats;
                        struct gve_adminq_report_link_speed report_link_speed;
+                       struct gve_adminq_get_ptype_map get_ptype_map;
                };
        };
        u8 reserved[64];
@@ -271,4 +358,9 @@ int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
                            dma_addr_t stats_report_addr, u64 interval);
 int gve_adminq_report_link_speed(struct gve_priv *priv);
+
+struct gve_ptype_lut;
+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+                                struct gve_ptype_lut *ptype_lut);
+
 #endif /* _GVE_ADMINQ_H */
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
new file mode 100644 (file)
index 0000000..e8fe9ad
--- /dev/null
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+/* GVE DQO Descriptor formats */
+
+#ifndef _GVE_DESC_DQO_H_
+#define _GVE_DESC_DQO_H_
+
+#include <linux/build_bug.h>
+
+#define GVE_TX_MAX_HDR_SIZE_DQO 255
+#define GVE_TX_MIN_TSO_MSS_DQO 88
+
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#error "Only little endian supported"
+#endif
+
+/* Basic TX descriptor (DTYPE 0x0C) */
+struct gve_tx_pkt_desc_dqo {
+       __le64 buf_addr;
+
+       /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */
+       u8 dtype: 5;
+
+       /* Denotes the last descriptor of a packet. */
+       u8 end_of_packet: 1;
+       u8 checksum_offload_enable: 1;
+
+       /* If set, will generate a descriptor completion for this descriptor. */
+       u8 report_event: 1;
+       u8 reserved0;
+       __le16 reserved1;
+
+       /* The TX completion associated with this packet will contain this tag.
+        */
+       __le16 compl_tag;
+       u16 buf_size: 14;
+       u16 reserved2: 2;
+} __packed;
+static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16);
+
+#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc
+#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)
+
+/* Maximum number of data descriptors allowed per packet, or per-TSO segment. */
+#define GVE_TX_MAX_DATA_DESCS 10
+
+/* Min gap between tail and head to avoid cacheline overlap */
+#define GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP 4
+
+/* "report_event" on TX packet descriptors may only be reported on the last
+ * descriptor of a TX packet, and they must be spaced apart with at least this
+ * value.
+ */
+#define GVE_TX_MIN_RE_INTERVAL 32
+
+struct gve_tx_context_cmd_dtype {
+       u8 dtype: 5;
+       u8 tso: 1;
+       u8 reserved1: 2;
+
+       u8 reserved2;
+};
+
+static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2);
+
+/* TX Native TSO Context DTYPE (0x05)
+ *
+ * "flex" fields allow the driver to send additional packet context to HW.
+ */
+struct gve_tx_tso_context_desc_dqo {
+       /* The L4 payload bytes that should be segmented. */
+       u32 tso_total_len: 24;
+       u32 flex10: 8;
+
+       /* Max segment size in TSO excluding headers. */
+       u16 mss: 14;
+       u16 reserved: 2;
+
+       u8 header_len; /* Header length to use for TSO offload */
+       u8 flex11;
+       struct gve_tx_context_cmd_dtype cmd_dtype;
+       u8 flex0;
+       u8 flex5;
+       u8 flex6;
+       u8 flex7;
+       u8 flex8;
+       u8 flex9;
+} __packed;
+static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16);
+
+#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5
+
+/* General context descriptor for sending metadata. */
+struct gve_tx_general_context_desc_dqo {
+       u8 flex4;
+       u8 flex5;
+       u8 flex6;
+       u8 flex7;
+       u8 flex8;
+       u8 flex9;
+       u8 flex10;
+       u8 flex11;
+       struct gve_tx_context_cmd_dtype cmd_dtype;
+       u16 reserved;
+       u8 flex0;
+       u8 flex1;
+       u8 flex2;
+       u8 flex3;
+} __packed;
+static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16);
+
+#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4
+
+/* Logical structure of metadata which is packed into context descriptor flex
+ * fields.
+ */
+struct gve_tx_metadata_dqo {
+       union {
+               struct {
+                       u8 version;
+
+                       /* If `skb->l4_hash` is set, this value should be
+                        * derived from `skb->hash`.
+                        *
+                        * A zero value means no l4_hash was associated with the
+                        * skb.
+                        */
+                       u16 path_hash: 15;
+
+                       /* Should be set to 1 if the flow associated with the
+                        * skb had a rehash from the TCP stack.
+                        */
+                       u16 rehash_event: 1;
+               }  __packed;
+               u8 bytes[12];
+       };
+}  __packed;
+static_assert(sizeof(struct gve_tx_metadata_dqo) == 12);
+
+#define GVE_TX_METADATA_VERSION_DQO 0
+
+/* TX completion descriptor */
+struct gve_tx_compl_desc {
+       /* For types 0-4 this is the TX queue ID associated with this
+        * completion.
+        */
+       u16 id: 11;
+
+       /* See: GVE_COMPL_TYPE_DQO* */
+       u16 type: 3;
+       u16 reserved0: 1;
+
+       /* Flipped by HW to notify the descriptor is populated. */
+       u16 generation: 1;
+       union {
+               /* For descriptor completions, this is the last index fetched
+                * by HW + 1.
+                */
+               __le16 tx_head;
+
+               /* For packet completions, this is the completion tag set on the
+                * TX packet descriptors.
+                */
+               __le16 completion_tag;
+       };
+       __le32 reserved1;
+} __packed;
+static_assert(sizeof(struct gve_tx_compl_desc) == 8);
+
+#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */
+#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */
+#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */
+#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */
+
+/* Descriptor to post buffers to HW on buffer queue. */
+struct gve_rx_desc_dqo {
+       __le16 buf_id; /* ID returned in Rx completion descriptor */
+       __le16 reserved0;
+       __le32 reserved1;
+       __le64 buf_addr; /* DMA address of the buffer */
+       __le64 header_buf_addr;
+       __le64 reserved2;
+} __packed;
+static_assert(sizeof(struct gve_rx_desc_dqo) == 32);
+
+/* Descriptor for HW to notify SW of new packets received on RX queue. */
+struct gve_rx_compl_desc_dqo {
+       /* Must be 1 */
+       u8 rxdid: 4;
+       u8 reserved0: 4;
+
+       /* Packet originated from this system rather than the network. */
+       u8 loopback: 1;
+       /* Set when IPv6 packet contains a destination options header or routing
+        * header.
+        */
+       u8 ipv6_ex_add: 1;
+       /* Invalid packet was received. */
+       u8 rx_error: 1;
+       u8 reserved1: 5;
+
+       u16 packet_type: 10;
+       u16 ip_hdr_err: 1;
+       u16 udp_len_err: 1;
+       u16 raw_cs_invalid: 1;
+       u16 reserved2: 3;
+
+       u16 packet_len: 14;
+       /* Flipped by HW to notify the descriptor is populated. */
+       u16 generation: 1;
+       /* Should be zero. */
+       u16 buffer_queue_id: 1;
+
+       u16 header_len: 10;
+       u16 rsc: 1;
+       u16 split_header: 1;
+       u16 reserved3: 4;
+
+       u8 descriptor_done: 1;
+       u8 end_of_packet: 1;
+       u8 header_buffer_overflow: 1;
+       u8 l3_l4_processed: 1;
+       u8 csum_ip_err: 1;
+       u8 csum_l4_err: 1;
+       u8 csum_external_ip_err: 1;
+       u8 csum_external_udp_err: 1;
+
+       u8 status_error1;
+
+       __le16 reserved5;
+       __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
+
+       union {
+               /* Packet checksum. */
+               __le16 raw_cs;
+               /* Segment length for RSC packets. */
+               __le16 rsc_seg_len;
+       };
+       __le32 hash;
+       __le32 reserved6;
+       __le64 reserved7;
+} __packed;
+
+static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
+
+/* Ringing the doorbell too often can hurt performance.
+ *
+ * HW requires this value to be at least 8.
+ */
+#define GVE_RX_BUF_THRESH_DQO 32
+
+#endif /* _GVE_DESC_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
new file mode 100644 (file)
index 0000000..8360423
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#ifndef _GVE_DQO_H_
+#define _GVE_DQO_H_
+
+#include "gve_adminq.h"
+
+#define GVE_ITR_ENABLE_BIT_DQO BIT(0)
+#define GVE_ITR_CLEAR_PBA_BIT_DQO BIT(1)
+#define GVE_ITR_NO_UPDATE_DQO (3 << 3)
+
+#define GVE_ITR_INTERVAL_DQO_SHIFT 5
+#define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1)
+
+#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
+#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
+
+/* Timeout in seconds to wait for a reinjection completion after receiving
+ * its corresponding miss completion.
+ */
+#define GVE_REINJECT_COMPL_TIMEOUT 1
+
+/* Timeout in seconds to deallocate the completion tag for a packet that was
+ * prematurely freed for not receiving a valid completion. This should be large
+ * enough to rule out the possibility of receiving the corresponding valid
+ * completion after this interval.
+ */
+#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
+
+netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
+bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
+void gve_tx_free_rings_dqo(struct gve_priv *priv);
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
+void gve_rx_free_rings_dqo(struct gve_priv *priv);
+int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+                         struct napi_struct *napi);
+void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
+void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+
+static inline void
+gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
+                       const struct gve_queue_resources *q_resources, u32 val)
+{
+       u64 index;
+
+       index = be32_to_cpu(q_resources->db_index);
+       iowrite32(val, &priv->db_bar2[index]);
+}
+
+/* Builds register value to write to DQO IRQ doorbell to enable with specified
+ * ratelimit.
+ */
+static inline u32 gve_set_itr_ratelimit_dqo(u32 ratelimit_us)
+{
+       u32 result = GVE_ITR_ENABLE_BIT_DQO;
+
+       /* Interval has 2us granularity. */
+       ratelimit_us >>= 1;
+
+       ratelimit_us &= GVE_ITR_INTERVAL_DQO_MASK;
+       result |= (ratelimit_us << GVE_ITR_INTERVAL_DQO_SHIFT);
+
+       return result;
+}
+
+static inline void
+gve_write_irq_doorbell_dqo(const struct gve_priv *priv,
+                          const struct gve_notify_block *block, u32 val)
+{
+       u32 index = be32_to_cpu(block->irq_db_index);
+
+       iowrite32(val, &priv->db_bar2[index]);
+}
+
+#endif /* _GVE_DQO_H_ */
index 5fb05cf..716e624 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include <linux/ethtool.h>
@@ -311,8 +311,16 @@ gve_get_ethtool_stats(struct net_device *netdev,
                for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
                        struct gve_tx_ring *tx = &priv->tx[ring];
 
-                       data[i++] = tx->req;
-                       data[i++] = tx->done;
+                       if (gve_is_gqi(priv)) {
+                               data[i++] = tx->req;
+                               data[i++] = tx->done;
+                       } else {
+                               /* DQO doesn't currently support
+                                * posted/completed descriptor counts;
+                                */
+                               data[i++] = 0;
+                               data[i++] = 0;
+                       }
                        do {
                                start =
                                  u64_stats_fetch_begin(&priv->tx[ring].statss);
@@ -453,11 +461,16 @@ static int gve_set_tunable(struct net_device *netdev,
 
        switch (etuna->id) {
        case ETHTOOL_RX_COPYBREAK:
+       {
+               u32 max_copybreak = gve_is_gqi(priv) ?
+                       (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
+
                len = *(u32 *)value;
-               if (len > PAGE_SIZE / 2)
+               if (len > max_copybreak)
                        return -EINVAL;
                priv->rx_copybreak = len;
                return 0;
+       }
        default:
                return -EOPNOTSUPP;
        }
index bbc423e..ac4819c 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include <linux/cpumask.h>
@@ -14,6 +14,7 @@
 #include <linux/workqueue.h>
 #include <net/sch_generic.h>
 #include "gve.h"
+#include "gve_dqo.h"
 #include "gve_adminq.h"
 #include "gve_register.h"
 
 const char gve_version_str[] = GVE_VERSION;
 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
 
+static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct gve_priv *priv = netdev_priv(dev);
+
+       if (gve_is_gqi(priv))
+               return gve_tx(skb, dev);
+       else
+               return gve_tx_dqo(skb, dev);
+}
+
 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
 {
        struct gve_priv *priv = netdev_priv(dev);
@@ -155,6 +166,15 @@ static irqreturn_t gve_intr(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t gve_intr_dqo(int irq, void *arg)
+{
+       struct gve_notify_block *block = arg;
+
+       /* Interrupts are automatically masked */
+       napi_schedule_irqoff(&block->napi);
+       return IRQ_HANDLED;
+}
+
 static int gve_napi_poll(struct napi_struct *napi, int budget)
 {
        struct gve_notify_block *block;
@@ -191,6 +211,54 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
        return 0;
 }
 
+static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+{
+       struct gve_notify_block *block =
+               container_of(napi, struct gve_notify_block, napi);
+       struct gve_priv *priv = block->priv;
+       bool reschedule = false;
+       int work_done = 0;
+
+       /* Clear PCI MSI-X Pending Bit Array (PBA)
+        *
+        * This bit is set if an interrupt event occurs while the vector is
+        * masked. If this bit is set and we reenable the interrupt, it will
+        * fire again. Since we're just about to poll the queue state, we don't
+        * need it to fire again.
+        *
+        * Under high softirq load, it's possible that the interrupt condition
+        * is triggered twice before we got the chance to process it.
+        */
+       gve_write_irq_doorbell_dqo(priv, block,
+                                  GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
+
+       if (block->tx)
+               reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+
+       if (block->rx) {
+               work_done = gve_rx_poll_dqo(block, budget);
+               reschedule |= work_done == budget;
+       }
+
+       if (reschedule)
+               return budget;
+
+       if (likely(napi_complete_done(napi, work_done))) {
+               /* Enable interrupts again.
+                *
+                * We don't need to repoll afterwards because HW supports the
+                * PCI MSI-X PBA feature.
+                *
+                * Another interrupt would be triggered if a new event came in
+                * since the last one.
+                */
+               gve_write_irq_doorbell_dqo(priv, block,
+                                          GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
+       }
+
+       return work_done;
+}
+
 static int gve_alloc_notify_blocks(struct gve_priv *priv)
 {
        int num_vecs_requested = priv->num_ntfy_blks + 1;
@@ -264,7 +332,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
                         name, i);
                block->priv = priv;
                err = request_irq(priv->msix_vectors[msix_idx].vector,
-                                 gve_intr, 0, block->name, block);
+                                 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
+                                 0, block->name, block);
                if (err) {
                        dev_err(&priv->pdev->dev,
                                "Failed to receive msix vector %d\n", i);
@@ -346,6 +415,22 @@ static int gve_setup_device_resources(struct gve_priv *priv)
                err = -ENXIO;
                goto abort_with_stats_report;
        }
+
+       if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+               priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
+                                              GFP_KERNEL);
+               if (!priv->ptype_lut_dqo) {
+                       err = -ENOMEM;
+                       goto abort_with_stats_report;
+               }
+               err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
+               if (err) {
+                       dev_err(&priv->pdev->dev,
+                               "Failed to get ptype map: err=%d\n", err);
+                       goto abort_with_ptype_lut;
+               }
+       }
+
        err = gve_adminq_report_stats(priv, priv->stats_report_len,
                                      priv->stats_report_bus,
                                      GVE_STATS_REPORT_TIMER_PERIOD);
@@ -354,12 +439,17 @@ static int gve_setup_device_resources(struct gve_priv *priv)
                        "Failed to report stats: err=%d\n", err);
        gve_set_device_resources_ok(priv);
        return 0;
+
+abort_with_ptype_lut:
+       kvfree(priv->ptype_lut_dqo);
+       priv->ptype_lut_dqo = NULL;
 abort_with_stats_report:
        gve_free_stats_report(priv);
 abort_with_ntfy_blocks:
        gve_free_notify_blocks(priv);
 abort_with_counter:
        gve_free_counter_array(priv);
+
        return err;
 }
 
@@ -386,17 +476,22 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
                        gve_trigger_reset(priv);
                }
        }
+
+       kvfree(priv->ptype_lut_dqo);
+       priv->ptype_lut_dqo = NULL;
+
        gve_free_counter_array(priv);
        gve_free_notify_blocks(priv);
        gve_free_stats_report(priv);
        gve_clear_device_resources_ok(priv);
 }
 
-static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
+static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+                        int (*gve_poll)(struct napi_struct *, int))
 {
        struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
 
-       netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
+       netif_napi_add(priv->dev, &block->napi, gve_poll,
                       NAPI_POLL_WEIGHT);
 }
 
@@ -476,31 +571,75 @@ static int gve_create_rings(struct gve_priv *priv)
        netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
                  priv->rx_cfg.num_queues);
 
-       /* Rx data ring has been prefilled with packet buffers at queue
-        * allocation time.
-        * Write the doorbell to provide descriptor slots and packet buffers
-        * to the NIC.
-        */
-       for (i = 0; i < priv->rx_cfg.num_queues; i++)
-               gve_rx_write_doorbell(priv, &priv->rx[i]);
+       if (gve_is_gqi(priv)) {
+               /* Rx data ring has been prefilled with packet buffers at queue
+                * allocation time.
+                *
+                * Write the doorbell to provide descriptor slots and packet
+                * buffers to the NIC.
+                */
+               for (i = 0; i < priv->rx_cfg.num_queues; i++)
+                       gve_rx_write_doorbell(priv, &priv->rx[i]);
+       } else {
+               for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+                       /* Post buffers and ring doorbell. */
+                       gve_rx_post_buffers_dqo(&priv->rx[i]);
+               }
+       }
 
        return 0;
 }
 
+static void add_napi_init_sync_stats(struct gve_priv *priv,
+                                    int (*napi_poll)(struct napi_struct *napi,
+                                                     int budget))
+{
+       int i;
+
+       /* Add tx napi & init sync stats*/
+       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+               int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+
+               u64_stats_init(&priv->tx[i].statss);
+               priv->tx[i].ntfy_id = ntfy_idx;
+               gve_add_napi(priv, ntfy_idx, napi_poll);
+       }
+       /* Add rx napi  & init sync stats*/
+       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+               int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+
+               u64_stats_init(&priv->rx[i].statss);
+               priv->rx[i].ntfy_id = ntfy_idx;
+               gve_add_napi(priv, ntfy_idx, napi_poll);
+       }
+}
+
+static void gve_tx_free_rings(struct gve_priv *priv)
+{
+       if (gve_is_gqi(priv)) {
+               gve_tx_free_rings_gqi(priv);
+       } else {
+               gve_tx_free_rings_dqo(priv);
+       }
+}
+
 static int gve_alloc_rings(struct gve_priv *priv)
 {
-       int ntfy_idx;
        int err;
-       int i;
 
        /* Setup tx rings */
        priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
                            GFP_KERNEL);
        if (!priv->tx)
                return -ENOMEM;
-       err = gve_tx_alloc_rings(priv);
+
+       if (gve_is_gqi(priv))
+               err = gve_tx_alloc_rings(priv);
+       else
+               err = gve_tx_alloc_rings_dqo(priv);
        if (err)
                goto free_tx;
+
        /* Setup rx rings */
        priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
                            GFP_KERNEL);
@@ -508,21 +647,18 @@ static int gve_alloc_rings(struct gve_priv *priv)
                err = -ENOMEM;
                goto free_tx_queue;
        }
-       err = gve_rx_alloc_rings(priv);
+
+       if (gve_is_gqi(priv))
+               err = gve_rx_alloc_rings(priv);
+       else
+               err = gve_rx_alloc_rings_dqo(priv);
        if (err)
                goto free_rx;
-       /* Add tx napi & init sync stats*/
-       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
-               u64_stats_init(&priv->tx[i].statss);
-               ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
-               gve_add_napi(priv, ntfy_idx);
-       }
-       /* Add rx napi  & init sync stats*/
-       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
-               u64_stats_init(&priv->rx[i].statss);
-               ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
-               gve_add_napi(priv, ntfy_idx);
-       }
+
+       if (gve_is_gqi(priv))
+               add_napi_init_sync_stats(priv, gve_napi_poll);
+       else
+               add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
 
        return 0;
 
@@ -560,6 +696,14 @@ static int gve_destroy_rings(struct gve_priv *priv)
        return 0;
 }
 
+static void gve_rx_free_rings(struct gve_priv *priv)
+{
+       if (gve_is_gqi(priv))
+               gve_rx_free_rings_gqi(priv);
+       else
+               gve_rx_free_rings_dqo(priv);
+}
+
 static void gve_free_rings(struct gve_priv *priv)
 {
        int ntfy_idx;
@@ -681,7 +825,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
        int err;
 
        /* Raw addressing means no QPLs */
-       if (priv->raw_addressing)
+       if (priv->queue_format == GVE_GQI_RDA_FORMAT)
                return 0;
 
        priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
@@ -725,7 +869,7 @@ static void gve_free_qpls(struct gve_priv *priv)
        int i;
 
        /* Raw addressing means no QPLs */
-       if (priv->raw_addressing)
+       if (priv->queue_format == GVE_GQI_RDA_FORMAT)
                return;
 
        kvfree(priv->qpl_cfg.qpl_id_map);
@@ -759,6 +903,7 @@ static int gve_open(struct net_device *dev)
        err = gve_alloc_qpls(priv);
        if (err)
                return err;
+
        err = gve_alloc_rings(priv);
        if (err)
                goto free_qpls;
@@ -773,9 +918,17 @@ static int gve_open(struct net_device *dev)
        err = gve_register_qpls(priv);
        if (err)
                goto reset;
+
+       if (!gve_is_gqi(priv)) {
+               /* Hard code this for now. This may be tuned in the future for
+                * performance.
+                */
+               priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
+       }
        err = gve_create_rings(priv);
        if (err)
                goto reset;
+
        gve_set_device_rings_ok(priv);
 
        if (gve_get_report_stats(priv))
@@ -924,14 +1077,26 @@ static void gve_turnup(struct gve_priv *priv)
                struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
 
                napi_enable(&block->napi);
-               iowrite32be(0, gve_irq_doorbell(priv, block));
+               if (gve_is_gqi(priv)) {
+                       iowrite32be(0, gve_irq_doorbell(priv, block));
+               } else {
+                       u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
+
+                       gve_write_irq_doorbell_dqo(priv, block, val);
+               }
        }
        for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
                int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
                struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
 
                napi_enable(&block->napi);
-               iowrite32be(0, gve_irq_doorbell(priv, block));
+               if (gve_is_gqi(priv)) {
+                       iowrite32be(0, gve_irq_doorbell(priv, block));
+               } else {
+                       u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
+
+                       gve_write_irq_doorbell_dqo(priv, block, val);
+               }
        }
 
        gve_set_napi_enabled(priv);
@@ -945,12 +1110,49 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
        priv->tx_timeo_cnt++;
 }
 
+static int gve_set_features(struct net_device *netdev,
+                           netdev_features_t features)
+{
+       const netdev_features_t orig_features = netdev->features;
+       struct gve_priv *priv = netdev_priv(netdev);
+       int err;
+
+       if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
+               netdev->features ^= NETIF_F_LRO;
+               if (netif_carrier_ok(netdev)) {
+                       /* To make this process as simple as possible we
+                        * teardown the device, set the new configuration,
+                        * and then bring the device up again.
+                        */
+                       err = gve_close(netdev);
+                       /* We have already tried to reset in close, just fail
+                        * at this point.
+                        */
+                       if (err)
+                               goto err;
+
+                       err = gve_open(netdev);
+                       if (err)
+                               goto err;
+               }
+       }
+
+       return 0;
+err:
+       /* Reverts the change on error. */
+       netdev->features = orig_features;
+       netif_err(priv, drv, netdev,
+                 "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
+       return err;
+}
+
 static const struct net_device_ops gve_netdev_ops = {
-       .ndo_start_xmit         =       gve_tx,
+       .ndo_start_xmit         =       gve_start_xmit,
        .ndo_open               =       gve_open,
        .ndo_stop               =       gve_close,
        .ndo_get_stats64        =       gve_get_stats,
        .ndo_tx_timeout         =       gve_tx_timeout,
+       .ndo_set_features       =       gve_set_features,
 };
 
 static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -994,6 +1196,15 @@ void gve_handle_report_stats(struct gve_priv *priv)
        /* tx stats */
        if (priv->tx) {
                for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+                       u32 last_completion = 0;
+                       u32 tx_frames = 0;
+
+                       /* DQO doesn't currently support these metrics. */
+                       if (gve_is_gqi(priv)) {
+                               last_completion = priv->tx[idx].done;
+                               tx_frames = priv->tx[idx].req;
+                       }
+
                        do {
                                start = u64_stats_fetch_begin(&priv->tx[idx].statss);
                                tx_bytes = priv->tx[idx].bytes_done;
@@ -1010,7 +1221,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
                        };
                        stats[stats_idx++] = (struct stats) {
                                .stat_name = cpu_to_be32(TX_FRAMES_SENT),
-                               .value = cpu_to_be64(priv->tx[idx].req),
+                               .value = cpu_to_be64(tx_frames),
                                .queue_id = cpu_to_be32(idx),
                        };
                        stats[stats_idx++] = (struct stats) {
@@ -1020,7 +1231,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
                        };
                        stats[stats_idx++] = (struct stats) {
                                .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
-                               .value = cpu_to_be64(priv->tx[idx].done),
+                               .value = cpu_to_be64(last_completion),
                                .queue_id = cpu_to_be32(idx),
                        };
                }
@@ -1088,7 +1299,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
        if (skip_describe_device)
                goto setup_device;
 
-       priv->raw_addressing = false;
+       priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
        /* Get the initial information we need from the device */
        err = gve_adminq_describe_device(priv);
        if (err) {
@@ -1096,7 +1307,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
                        "Could not get device information: err=%d\n", err);
                goto err;
        }
-       if (priv->dev->max_mtu > PAGE_SIZE) {
+       if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
                priv->dev->max_mtu = PAGE_SIZE;
                err = gve_adminq_set_mtu(priv, priv->dev->mtu);
                if (err) {
@@ -1307,7 +1518,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, dev);
        dev->ethtool_ops = &gve_ethtool_ops;
        dev->netdev_ops = &gve_netdev_ops;
-       /* advertise features */
+
+       /* Set default and supported features.
+        *
+        * Features might be set in other locations as well (such as
+        * `gve_adminq_describe_device`).
+        */
        dev->hw_features = NETIF_F_HIGHDMA;
        dev->hw_features |= NETIF_F_SG;
        dev->hw_features |= NETIF_F_HW_CSUM;
@@ -1352,6 +1568,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto abort_with_wq;
 
        dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
+       dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
        gve_clear_probe_in_progress(priv);
        queue_work(priv->gve_wq, &priv->service_task);
        return 0;
index bf123fe..bb82613 100644 (file)
@@ -1,21 +1,14 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include "gve.h"
 #include "gve_adminq.h"
+#include "gve_utils.h"
 #include <linux/etherdevice.h>
 
-static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
-{
-       struct gve_notify_block *block =
-                       &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
-
-       block->rx = NULL;
-}
-
 static void gve_rx_free_buffer(struct device *dev,
                               struct gve_rx_slot_page_info *page_info,
                               union gve_rx_data_slot *data_slot)
@@ -137,16 +130,6 @@ alloc_err:
        return err;
 }
 
-static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
-{
-       u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
-       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
-       struct gve_rx_ring *rx = &priv->rx[queue_idx];
-
-       block->rx = rx;
-       rx->ntfy_id = ntfy_idx;
-}
-
 static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
 {
        struct gve_rx_ring *rx = &priv->rx[idx];
@@ -165,7 +148,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
 
        slots = priv->rx_data_slot_cnt;
        rx->mask = slots - 1;
-       rx->data.raw_addressing = priv->raw_addressing;
+       rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
 
        /* alloc rx data ring */
        bytes = sizeof(*rx->data.data_ring) * slots;
@@ -255,7 +238,7 @@ int gve_rx_alloc_rings(struct gve_priv *priv)
        return err;
 }
 
-void gve_rx_free_rings(struct gve_priv *priv)
+void gve_rx_free_rings_gqi(struct gve_priv *priv)
 {
        int i;
 
@@ -279,27 +262,6 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
        return PKT_HASH_TYPE_L2;
 }
 
-static struct sk_buff *gve_rx_copy(struct net_device *dev,
-                                  struct napi_struct *napi,
-                                  struct gve_rx_slot_page_info *page_info,
-                                  u16 len)
-{
-       struct sk_buff *skb = napi_alloc_skb(napi, len);
-       void *va = page_info->page_address + GVE_RX_PAD +
-                  (page_info->page_offset ? PAGE_SIZE / 2 : 0);
-
-       if (unlikely(!skb))
-               return NULL;
-
-       __skb_put(skb, len);
-
-       skb_copy_to_linear_data(skb, va, len);
-
-       skb->protocol = eth_type_trans(skb, dev);
-
-       return skb;
-}
-
 static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
                                        struct gve_rx_slot_page_info *page_info,
                                        u16 len)
@@ -310,7 +272,7 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
                return NULL;
 
        skb_add_rx_frag(skb, 0, page_info->page,
-                       (page_info->page_offset ? PAGE_SIZE / 2 : 0) +
+                       page_info->page_offset +
                        GVE_RX_PAD, len, PAGE_SIZE / 2);
 
        return skb;
@@ -321,7 +283,7 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
        const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
 
        /* "flip" to other packet buffer on this page */
-       page_info->page_offset ^= 0x1;
+       page_info->page_offset ^= PAGE_SIZE / 2;
        *(slot_addr) ^= offset;
 }
 
@@ -388,7 +350,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
                        gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
                }
        } else {
-               skb = gve_rx_copy(netdev, napi, page_info, len);
+               skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
                if (skb) {
                        u64_stats_update_begin(&rx->statss);
                        rx->rx_copied_pkt++;
@@ -430,7 +392,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
 
        if (len <= priv->rx_copybreak) {
                /* Just copy small packets */
-               skb = gve_rx_copy(dev, napi, page_info, len);
+               skb = gve_rx_copy(dev, napi, page_info, len, GVE_RX_PAD);
                u64_stats_update_begin(&rx->statss);
                rx->rx_copied_pkt++;
                rx->rx_copybreak_pkt++;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
new file mode 100644 (file)
index 0000000..8738db0
--- /dev/null
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_dqo.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
+{
+       return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
+}
+
+static void gve_free_page_dqo(struct gve_priv *priv,
+                             struct gve_rx_buf_state_dqo *bs)
+{
+       page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
+       gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
+                     DMA_FROM_DEVICE);
+       bs->page_info.page = NULL;
+}
+
+static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
+{
+       struct gve_rx_buf_state_dqo *buf_state;
+       s16 buffer_id;
+
+       buffer_id = rx->dqo.free_buf_states;
+       if (unlikely(buffer_id == -1))
+               return NULL;
+
+       buf_state = &rx->dqo.buf_states[buffer_id];
+
+       /* Remove buf_state from free list */
+       rx->dqo.free_buf_states = buf_state->next;
+
+       /* Point buf_state to itself to mark it as allocated */
+       buf_state->next = buffer_id;
+
+       return buf_state;
+}
+
+static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
+                                      struct gve_rx_buf_state_dqo *buf_state)
+{
+       s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+       return buf_state->next == buffer_id;
+}
+
+static void gve_free_buf_state(struct gve_rx_ring *rx,
+                              struct gve_rx_buf_state_dqo *buf_state)
+{
+       s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+       buf_state->next = rx->dqo.free_buf_states;
+       rx->dqo.free_buf_states = buffer_id;
+}
+
+static struct gve_rx_buf_state_dqo *
+gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list)
+{
+       struct gve_rx_buf_state_dqo *buf_state;
+       s16 buffer_id;
+
+       buffer_id = list->head;
+       if (unlikely(buffer_id == -1))
+               return NULL;
+
+       buf_state = &rx->dqo.buf_states[buffer_id];
+
+       /* Remove buf_state from list */
+       list->head = buf_state->next;
+       if (buf_state->next == -1)
+               list->tail = -1;
+
+       /* Point buf_state to itself to mark it as allocated */
+       buf_state->next = buffer_id;
+
+       return buf_state;
+}
+
+static void gve_enqueue_buf_state(struct gve_rx_ring *rx,
+                                 struct gve_index_list *list,
+                                 struct gve_rx_buf_state_dqo *buf_state)
+{
+       s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+       buf_state->next = -1;
+
+       if (list->head == -1) {
+               list->head = buffer_id;
+               list->tail = buffer_id;
+       } else {
+               int tail = list->tail;
+
+               rx->dqo.buf_states[tail].next = buffer_id;
+               list->tail = buffer_id;
+       }
+}
+
+static struct gve_rx_buf_state_dqo *
+gve_get_recycled_buf_state(struct gve_rx_ring *rx)
+{
+       struct gve_rx_buf_state_dqo *buf_state;
+       int i;
+
+       /* Recycled buf states are immediately usable. */
+       buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
+       if (likely(buf_state))
+               return buf_state;
+
+       if (unlikely(rx->dqo.used_buf_states.head == -1))
+               return NULL;
+
+       /* Used buf states are only usable when ref count reaches 0, which means
+        * no SKBs refer to them.
+        *
+        * Search a limited number before giving up.
+        */
+       for (i = 0; i < 5; i++) {
+               buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+               if (gve_buf_ref_cnt(buf_state) == 0)
+                       return buf_state;
+
+               gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+       }
+
+       /* If there are no free buf states discard an entry from
+        * `used_buf_states` so it can be used.
+        */
+       if (unlikely(rx->dqo.free_buf_states == -1)) {
+               buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+               if (gve_buf_ref_cnt(buf_state) == 0)
+                       return buf_state;
+
+               gve_free_page_dqo(rx->gve, buf_state);
+               gve_free_buf_state(rx, buf_state);
+       }
+
+       return NULL;
+}
+
+static int gve_alloc_page_dqo(struct gve_priv *priv,
+                             struct gve_rx_buf_state_dqo *buf_state)
+{
+       int err;
+
+       err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
+                            &buf_state->addr, DMA_FROM_DEVICE);
+       if (err)
+               return err;
+
+       buf_state->page_info.page_offset = 0;
+       buf_state->page_info.page_address =
+               page_address(buf_state->page_info.page);
+       buf_state->last_single_ref_offset = 0;
+
+       /* The page already has 1 ref. */
+       page_ref_add(buf_state->page_info.page, INT_MAX - 1);
+       buf_state->page_info.pagecnt_bias = INT_MAX;
+
+       return 0;
+}
+
+static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_rx_ring *rx = &priv->rx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       size_t completion_queue_slots;
+       size_t buffer_queue_slots;
+       size_t size;
+       int i;
+
+       completion_queue_slots = rx->dqo.complq.mask + 1;
+       buffer_queue_slots = rx->dqo.bufq.mask + 1;
+
+       gve_rx_remove_from_block(priv, idx);
+
+       if (rx->q_resources) {
+               dma_free_coherent(hdev, sizeof(*rx->q_resources),
+                                 rx->q_resources, rx->q_resources_bus);
+               rx->q_resources = NULL;
+       }
+
+       for (i = 0; i < rx->dqo.num_buf_states; i++) {
+               struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
+
+               if (bs->page_info.page)
+                       gve_free_page_dqo(priv, bs);
+       }
+
+       if (rx->dqo.bufq.desc_ring) {
+               size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
+               dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring,
+                                 rx->dqo.bufq.bus);
+               rx->dqo.bufq.desc_ring = NULL;
+       }
+
+       if (rx->dqo.complq.desc_ring) {
+               size = sizeof(rx->dqo.complq.desc_ring[0]) *
+                       completion_queue_slots;
+               dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring,
+                                 rx->dqo.complq.bus);
+               rx->dqo.complq.desc_ring = NULL;
+       }
+
+       kvfree(rx->dqo.buf_states);
+       rx->dqo.buf_states = NULL;
+
+       netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
+}
+
+static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_rx_ring *rx = &priv->rx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       size_t size;
+       int i;
+
+       const u32 buffer_queue_slots =
+               priv->options_dqo_rda.rx_buff_ring_entries;
+       const u32 completion_queue_slots = priv->rx_desc_cnt;
+
+       netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
+
+       memset(rx, 0, sizeof(*rx));
+       rx->gve = priv;
+       rx->q_num = idx;
+       rx->dqo.bufq.mask = buffer_queue_slots - 1;
+       rx->dqo.complq.num_free_slots = completion_queue_slots;
+       rx->dqo.complq.mask = completion_queue_slots - 1;
+       rx->skb_head = NULL;
+       rx->skb_tail = NULL;
+
+       rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
+       rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
+                                     sizeof(rx->dqo.buf_states[0]),
+                                     GFP_KERNEL);
+       if (!rx->dqo.buf_states)
+               return -ENOMEM;
+
+       /* Set up linked list of buffer IDs */
+       for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+               rx->dqo.buf_states[i].next = i + 1;
+
+       rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
+       rx->dqo.recycled_buf_states.head = -1;
+       rx->dqo.recycled_buf_states.tail = -1;
+       rx->dqo.used_buf_states.head = -1;
+       rx->dqo.used_buf_states.tail = -1;
+
+       /* Allocate RX completion queue */
+       size = sizeof(rx->dqo.complq.desc_ring[0]) *
+               completion_queue_slots;
+       rx->dqo.complq.desc_ring =
+               dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL);
+       if (!rx->dqo.complq.desc_ring)
+               goto err;
+
+       /* Allocate RX buffer queue */
+       size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
+       rx->dqo.bufq.desc_ring =
+               dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL);
+       if (!rx->dqo.bufq.desc_ring)
+               goto err;
+
+       rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
+                                            &rx->q_resources_bus, GFP_KERNEL);
+       if (!rx->q_resources)
+               goto err;
+
+       gve_rx_add_to_block(priv, idx);
+
+       return 0;
+
+err:
+       gve_rx_free_ring_dqo(priv, idx);
+       return -ENOMEM;
+}
+
+void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
+{
+       const struct gve_rx_ring *rx = &priv->rx[queue_idx];
+       u64 index = be32_to_cpu(rx->q_resources->db_index);
+
+       iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
+}
+
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+               err = gve_rx_alloc_ring_dqo(priv, i);
+               if (err) {
+                       netif_err(priv, drv, priv->dev,
+                                 "Failed to alloc rx ring=%d: err=%d\n",
+                                 i, err);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       for (i--; i >= 0; i--)
+               gve_rx_free_ring_dqo(priv, i);
+
+       return err;
+}
+
+void gve_rx_free_rings_dqo(struct gve_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->rx_cfg.num_queues; i++)
+               gve_rx_free_ring_dqo(priv, i);
+}
+
+void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
+{
+       struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
+       struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
+       struct gve_priv *priv = rx->gve;
+       u32 num_avail_slots;
+       u32 num_full_slots;
+       u32 num_posted = 0;
+
+       num_full_slots = (bufq->tail - bufq->head) & bufq->mask;
+       num_avail_slots = bufq->mask - num_full_slots;
+
+       num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots);
+       while (num_posted < num_avail_slots) {
+               struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail];
+               struct gve_rx_buf_state_dqo *buf_state;
+
+               buf_state = gve_get_recycled_buf_state(rx);
+               if (unlikely(!buf_state)) {
+                       buf_state = gve_alloc_buf_state(rx);
+                       if (unlikely(!buf_state))
+                               break;
+
+                       if (unlikely(gve_alloc_page_dqo(priv, buf_state))) {
+                               u64_stats_update_begin(&rx->statss);
+                               rx->rx_buf_alloc_fail++;
+                               u64_stats_update_end(&rx->statss);
+                               gve_free_buf_state(rx, buf_state);
+                               break;
+                       }
+               }
+
+               desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+               desc->buf_addr = cpu_to_le64(buf_state->addr +
+                                            buf_state->page_info.page_offset);
+
+               bufq->tail = (bufq->tail + 1) & bufq->mask;
+               complq->num_free_slots--;
+               num_posted++;
+
+               if ((bufq->tail & (GVE_RX_BUF_THRESH_DQO - 1)) == 0)
+                       gve_rx_write_doorbell_dqo(priv, rx->q_num);
+       }
+
+       rx->fill_cnt += num_posted;
+}
+
+static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
+                               struct gve_rx_buf_state_dqo *buf_state)
+{
+       const int data_buffer_size = priv->data_buffer_size_dqo;
+       int pagecount;
+
+       /* Can't reuse if we only fit one buffer per page */
+       if (data_buffer_size * 2 > PAGE_SIZE)
+               goto mark_used;
+
+       pagecount = gve_buf_ref_cnt(buf_state);
+
+       /* Record the offset when we have a single remaining reference.
+        *
+        * When this happens, we know all of the other offsets of the page are
+        * usable.
+        */
+       if (pagecount == 1) {
+               buf_state->last_single_ref_offset =
+                       buf_state->page_info.page_offset;
+       }
+
+       /* Use the next buffer sized chunk in the page. */
+       buf_state->page_info.page_offset += data_buffer_size;
+       buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
+
+       /* If we wrap around to the same offset without ever dropping to 1
+        * reference, then we don't know if this offset was ever freed.
+        */
+       if (buf_state->page_info.page_offset ==
+           buf_state->last_single_ref_offset) {
+               goto mark_used;
+       }
+
+       gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+       return;
+
+mark_used:
+       gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+}
+
+static void gve_rx_skb_csum(struct sk_buff *skb,
+                           const struct gve_rx_compl_desc_dqo *desc,
+                           struct gve_ptype ptype)
+{
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* HW did not identify and process L3 and L4 headers. */
+       if (unlikely(!desc->l3_l4_processed))
+               return;
+
+       if (ptype.l3_type == GVE_L3_TYPE_IPV4) {
+               if (unlikely(desc->csum_ip_err || desc->csum_external_ip_err))
+                       return;
+       } else if (ptype.l3_type == GVE_L3_TYPE_IPV6) {
+               /* Checksum should be skipped if this flag is set. */
+               if (unlikely(desc->ipv6_ex_add))
+                       return;
+       }
+
+       if (unlikely(desc->csum_l4_err))
+               return;
+
+       switch (ptype.l4_type) {
+       case GVE_L4_TYPE_TCP:
+       case GVE_L4_TYPE_UDP:
+       case GVE_L4_TYPE_ICMP:
+       case GVE_L4_TYPE_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               break;
+       default:
+               break;
+       }
+}
+
+static void gve_rx_skb_hash(struct sk_buff *skb,
+                           const struct gve_rx_compl_desc_dqo *compl_desc,
+                           struct gve_ptype ptype)
+{
+       enum pkt_hash_types hash_type = PKT_HASH_TYPE_L2;
+
+       if (ptype.l4_type != GVE_L4_TYPE_UNKNOWN)
+               hash_type = PKT_HASH_TYPE_L4;
+       else if (ptype.l3_type != GVE_L3_TYPE_UNKNOWN)
+               hash_type = PKT_HASH_TYPE_L3;
+
+       skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
+}
+
+static void gve_rx_free_skb(struct gve_rx_ring *rx)
+{
+       if (!rx->skb_head)
+               return;
+
+       dev_kfree_skb_any(rx->skb_head);
+       rx->skb_head = NULL;
+       rx->skb_tail = NULL;
+}
+
+/* Chains multi skbs for single rx packet.
+ * Returns 0 if buffer is appended, -1 otherwise.
+ */
+static int gve_rx_append_frags(struct napi_struct *napi,
+                              struct gve_rx_buf_state_dqo *buf_state,
+                              u16 buf_len, struct gve_rx_ring *rx,
+                              struct gve_priv *priv)
+{
+       int num_frags = skb_shinfo(rx->skb_tail)->nr_frags;
+
+       if (unlikely(num_frags == MAX_SKB_FRAGS)) {
+               struct sk_buff *skb;
+
+               skb = napi_alloc_skb(napi, 0);
+               if (!skb)
+                       return -1;
+
+               skb_shinfo(rx->skb_tail)->frag_list = skb;
+               rx->skb_tail = skb;
+               num_frags = 0;
+       }
+       if (rx->skb_tail != rx->skb_head) {
+               rx->skb_head->len += buf_len;
+               rx->skb_head->data_len += buf_len;
+               rx->skb_head->truesize += priv->data_buffer_size_dqo;
+       }
+
+       skb_add_rx_frag(rx->skb_tail, num_frags,
+                       buf_state->page_info.page,
+                       buf_state->page_info.page_offset,
+                       buf_len, priv->data_buffer_size_dqo);
+       gve_dec_pagecnt_bias(&buf_state->page_info);
+
+       return 0;
+}
+
+/* Returns 0 if descriptor is completed successfully.
+ * Returns -EINVAL if descriptor is invalid.
+ * Returns -ENOMEM if data cannot be copied to skb.
+ */
+static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+                     const struct gve_rx_compl_desc_dqo *compl_desc,
+                     int queue_idx)
+{
+       const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
+       const bool eop = compl_desc->end_of_packet != 0;
+       struct gve_rx_buf_state_dqo *buf_state;
+       struct gve_priv *priv = rx->gve;
+       u16 buf_len;
+
+       if (unlikely(buffer_id > rx->dqo.num_buf_states)) {
+               net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
+                                   priv->dev->name, buffer_id);
+               return -EINVAL;
+       }
+       buf_state = &rx->dqo.buf_states[buffer_id];
+       if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) {
+               net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n",
+                                   priv->dev->name, buffer_id);
+               return -EINVAL;
+       }
+
+       if (unlikely(compl_desc->rx_error)) {
+               gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+                                     buf_state);
+               return -EINVAL;
+       }
+
+       buf_len = compl_desc->packet_len;
+
+       /* Page might have not been used for awhile and was likely last written
+        * by a different thread.
+        */
+       prefetch(buf_state->page_info.page);
+
+       /* Sync the portion of dma buffer for CPU to read. */
+       dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+                                     buf_state->page_info.page_offset,
+                                     buf_len, DMA_FROM_DEVICE);
+
+       /* Append to current skb if one exists. */
+       if (rx->skb_head) {
+               if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
+                                                priv)) != 0) {
+                       goto error;
+               }
+
+               gve_try_recycle_buf(priv, rx, buf_state);
+               return 0;
+       }
+
+       /* Prefetch the payload header. */
+       prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
+#if L1_CACHE_BYTES < 128
+       prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
+                L1_CACHE_BYTES);
+#endif
+
+       if (eop && buf_len <= priv->rx_copybreak) {
+               rx->skb_head = gve_rx_copy(priv->dev, napi,
+                                          &buf_state->page_info, buf_len, 0);
+               if (unlikely(!rx->skb_head))
+                       goto error;
+               rx->skb_tail = rx->skb_head;
+
+               u64_stats_update_begin(&rx->statss);
+               rx->rx_copied_pkt++;
+               rx->rx_copybreak_pkt++;
+               u64_stats_update_end(&rx->statss);
+
+               gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+                                     buf_state);
+               return 0;
+       }
+
+       rx->skb_head = napi_get_frags(napi);
+       if (unlikely(!rx->skb_head))
+               goto error;
+       rx->skb_tail = rx->skb_head;
+
+       skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page,
+                       buf_state->page_info.page_offset, buf_len,
+                       priv->data_buffer_size_dqo);
+       gve_dec_pagecnt_bias(&buf_state->page_info);
+
+       gve_try_recycle_buf(priv, rx, buf_state);
+       return 0;
+
+error:
+       gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+       return -ENOMEM;
+}
+
+static int gve_rx_complete_rsc(struct sk_buff *skb,
+                              const struct gve_rx_compl_desc_dqo *desc,
+                              struct gve_ptype ptype)
+{
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+       /* Only TCP is supported right now. */
+       if (ptype.l4_type != GVE_L4_TYPE_TCP)
+               return -EINVAL;
+
+       switch (ptype.l3_type) {
+       case GVE_L3_TYPE_IPV4:
+               shinfo->gso_type = SKB_GSO_TCPV4;
+               break;
+       case GVE_L3_TYPE_IPV6:
+               shinfo->gso_type = SKB_GSO_TCPV6;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len);
+       return 0;
+}
+
+/* Returns 0 if skb is completed successfully, -1 otherwise. */
+static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
+                              const struct gve_rx_compl_desc_dqo *desc,
+                              netdev_features_t feat)
+{
+       struct gve_ptype ptype =
+               rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
+       int err;
+
+       skb_record_rx_queue(rx->skb_head, rx->q_num);
+
+       if (feat & NETIF_F_RXHASH)
+               gve_rx_skb_hash(rx->skb_head, desc, ptype);
+
+       if (feat & NETIF_F_RXCSUM)
+               gve_rx_skb_csum(rx->skb_head, desc, ptype);
+
+       /* RSC packets must set gso_size otherwise the TCP stack will complain
+        * that packets are larger than MTU.
+        */
+       if (desc->rsc) {
+               err = gve_rx_complete_rsc(rx->skb_head, desc, ptype);
+               if (err < 0)
+                       return err;
+       }
+
+       if (skb_headlen(rx->skb_head) == 0)
+               napi_gro_frags(napi);
+       else
+               napi_gro_receive(napi, rx->skb_head);
+
+       return 0;
+}
+
+int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
+{
+       struct napi_struct *napi = &block->napi;
+       netdev_features_t feat = napi->dev->features;
+
+       struct gve_rx_ring *rx = block->rx;
+       struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
+
+       u32 work_done = 0;
+       u64 bytes = 0;
+       int err;
+
+       while (work_done < budget) {
+               struct gve_rx_compl_desc_dqo *compl_desc =
+                       &complq->desc_ring[complq->head];
+               u32 pkt_bytes;
+
+               /* No more new packets */
+               if (compl_desc->generation == complq->cur_gen_bit)
+                       break;
+
+               /* Prefetch the next two descriptors. */
+               prefetch(&complq->desc_ring[(complq->head + 1) & complq->mask]);
+               prefetch(&complq->desc_ring[(complq->head + 2) & complq->mask]);
+
+               /* Do not read data until we own the descriptor */
+               dma_rmb();
+
+               err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+               if (err < 0) {
+                       gve_rx_free_skb(rx);
+                       u64_stats_update_begin(&rx->statss);
+                       if (err == -ENOMEM)
+                               rx->rx_skb_alloc_fail++;
+                       else if (err == -EINVAL)
+                               rx->rx_desc_err_dropped_pkt++;
+                       u64_stats_update_end(&rx->statss);
+               }
+
+               complq->head = (complq->head + 1) & complq->mask;
+               complq->num_free_slots++;
+
+               /* When the ring wraps, the generation bit is flipped. */
+               complq->cur_gen_bit ^= (complq->head == 0);
+
+               /* Receiving a completion means we have space to post another
+                * buffer on the buffer queue.
+                */
+               {
+                       struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq;
+
+                       bufq->head = (bufq->head + 1) & bufq->mask;
+               }
+
+               /* Free running counter of completed descriptors */
+               rx->cnt++;
+
+               if (!rx->skb_head)
+                       continue;
+
+               if (!compl_desc->end_of_packet)
+                       continue;
+
+               work_done++;
+               pkt_bytes = rx->skb_head->len;
+               /* The ethernet header (first ETH_HLEN bytes) is snipped off
+                * by eth_type_trans.
+                */
+               if (skb_headlen(rx->skb_head))
+                       pkt_bytes += ETH_HLEN;
+
+               /* gve_rx_complete_skb() will consume skb if successful */
+               if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
+                       gve_rx_free_skb(rx);
+                       u64_stats_update_begin(&rx->statss);
+                       rx->rx_desc_err_dropped_pkt++;
+                       u64_stats_update_end(&rx->statss);
+                       continue;
+               }
+
+               bytes += pkt_bytes;
+               rx->skb_head = NULL;
+               rx->skb_tail = NULL;
+       }
+
+       gve_rx_post_buffers_dqo(rx);
+
+       u64_stats_update_begin(&rx->statss);
+       rx->rpackets += work_done;
+       rx->rbytes += bytes;
+       u64_stats_update_end(&rx->statss);
+
+       return work_done;
+}
index 3e04a39..665ac79 100644 (file)
@@ -1,11 +1,12 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Google virtual Ethernet (gve) driver
  *
- * Copyright (C) 2015-2019 Google, Inc.
+ * Copyright (C) 2015-2021 Google, Inc.
  */
 
 #include "gve.h"
 #include "gve_adminq.h"
+#include "gve_utils.h"
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/vmalloc.h>
@@ -131,14 +132,6 @@ static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
        atomic_add(bytes, &fifo->available);
 }
 
-static void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
-{
-       struct gve_notify_block *block =
-                       &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
-
-       block->tx = NULL;
-}
-
 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
                             u32 to_do, bool try_to_wake);
 
@@ -174,16 +167,6 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
        netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
 }
 
-static void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
-{
-       int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
-       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
-       struct gve_tx_ring *tx = &priv->tx[queue_idx];
-
-       block->tx = tx;
-       tx->ntfy_id = ntfy_idx;
-}
-
 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
 {
        struct gve_tx_ring *tx = &priv->tx[idx];
@@ -208,7 +191,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
        if (!tx->desc)
                goto abort_with_info;
 
-       tx->raw_addressing = priv->raw_addressing;
+       tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
        tx->dev = &priv->pdev->dev;
        if (!tx->raw_addressing) {
                tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
@@ -273,7 +256,7 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
        return err;
 }
 
-void gve_tx_free_rings(struct gve_priv *priv)
+void gve_tx_free_rings_gqi(struct gve_priv *priv)
 {
        int i;
 
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
new file mode 100644 (file)
index 0000000..05ddb6a
--- /dev/null
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+#include "gve_dqo.h"
+#include <linux/tcp.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+
+/* Returns true if a gve_tx_pending_packet_dqo object is available. */
+static bool gve_has_pending_packet(struct gve_tx_ring *tx)
+{
+       /* Check TX path's list. */
+       if (tx->dqo_tx.free_pending_packets != -1)
+               return true;
+
+       /* Check completion handler's list. */
+       if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1)
+               return true;
+
+       return false;
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_alloc_pending_packet(struct gve_tx_ring *tx)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       s16 index;
+
+       index = tx->dqo_tx.free_pending_packets;
+
+       /* No pending_packets available, try to steal the list from the
+        * completion handler.
+        */
+       if (unlikely(index == -1)) {
+               tx->dqo_tx.free_pending_packets =
+                       atomic_xchg(&tx->dqo_compl.free_pending_packets, -1);
+               index = tx->dqo_tx.free_pending_packets;
+
+               if (unlikely(index == -1))
+                       return NULL;
+       }
+
+       pending_packet = &tx->dqo.pending_packets[index];
+
+       /* Remove pending_packet from free list */
+       tx->dqo_tx.free_pending_packets = pending_packet->next;
+       pending_packet->state = GVE_PACKET_STATE_PENDING_DATA_COMPL;
+
+       return pending_packet;
+}
+
+static void
+gve_free_pending_packet(struct gve_tx_ring *tx,
+                       struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       s16 index = pending_packet - tx->dqo.pending_packets;
+
+       pending_packet->state = GVE_PACKET_STATE_UNALLOCATED;
+       while (true) {
+               s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets);
+
+               pending_packet->next = old_head;
+               if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets,
+                                  old_head, index) == old_head) {
+                       break;
+               }
+       }
+}
+
+/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
+ */
+static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
+{
+       int i;
+
+       for (i = 0; i < tx->dqo.num_pending_packets; i++) {
+               struct gve_tx_pending_packet_dqo *cur_state =
+                       &tx->dqo.pending_packets[i];
+               int j;
+
+               for (j = 0; j < cur_state->num_bufs; j++) {
+                       struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
+
+                       if (j == 0) {
+                               dma_unmap_single(tx->dev,
+                                                dma_unmap_addr(buf, dma),
+                                                dma_unmap_len(buf, len),
+                                                DMA_TO_DEVICE);
+                       } else {
+                               dma_unmap_page(tx->dev,
+                                              dma_unmap_addr(buf, dma),
+                                              dma_unmap_len(buf, len),
+                                              DMA_TO_DEVICE);
+                       }
+               }
+               if (cur_state->skb) {
+                       dev_consume_skb_any(cur_state->skb);
+                       cur_state->skb = NULL;
+               }
+       }
+}
+
+static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_tx_ring *tx = &priv->tx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       size_t bytes;
+
+       gve_tx_remove_from_block(priv, idx);
+
+       if (tx->q_resources) {
+               dma_free_coherent(hdev, sizeof(*tx->q_resources),
+                                 tx->q_resources, tx->q_resources_bus);
+               tx->q_resources = NULL;
+       }
+
+       if (tx->dqo.compl_ring) {
+               bytes = sizeof(tx->dqo.compl_ring[0]) *
+                       (tx->dqo.complq_mask + 1);
+               dma_free_coherent(hdev, bytes, tx->dqo.compl_ring,
+                                 tx->complq_bus_dqo);
+               tx->dqo.compl_ring = NULL;
+       }
+
+       if (tx->dqo.tx_ring) {
+               bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
+               dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus);
+               tx->dqo.tx_ring = NULL;
+       }
+
+       kvfree(tx->dqo.pending_packets);
+       tx->dqo.pending_packets = NULL;
+
+       netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
+}
+
+static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+{
+       struct gve_tx_ring *tx = &priv->tx[idx];
+       struct device *hdev = &priv->pdev->dev;
+       int num_pending_packets;
+       size_t bytes;
+       int i;
+
+       memset(tx, 0, sizeof(*tx));
+       tx->q_num = idx;
+       tx->dev = &priv->pdev->dev;
+       tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+       atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
+
+       /* Queue sizes must be a power of 2 */
+       tx->mask = priv->tx_desc_cnt - 1;
+       tx->dqo.complq_mask = priv->options_dqo_rda.tx_comp_ring_entries - 1;
+
+       /* The max number of pending packets determines the maximum number of
+        * descriptors which maybe written to the completion queue.
+        *
+        * We must set the number small enough to make sure we never overrun the
+        * completion queue.
+        */
+       num_pending_packets = tx->dqo.complq_mask + 1;
+
+       /* Reserve space for descriptor completions, which will be reported at
+        * most every GVE_TX_MIN_RE_INTERVAL packets.
+        */
+       num_pending_packets -=
+               (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL;
+
+       /* Each packet may have at most 2 buffer completions if it receives both
+        * a miss and reinjection completion.
+        */
+       num_pending_packets /= 2;
+
+       tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX);
+       tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets,
+                                          sizeof(tx->dqo.pending_packets[0]),
+                                          GFP_KERNEL);
+       if (!tx->dqo.pending_packets)
+               goto err;
+
+       /* Set up linked list of pending packets */
+       for (i = 0; i < tx->dqo.num_pending_packets - 1; i++)
+               tx->dqo.pending_packets[i].next = i + 1;
+
+       tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
+       atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+       tx->dqo_compl.miss_completions.head = -1;
+       tx->dqo_compl.miss_completions.tail = -1;
+       tx->dqo_compl.timed_out_completions.head = -1;
+       tx->dqo_compl.timed_out_completions.tail = -1;
+
+       bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1);
+       tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
+       if (!tx->dqo.tx_ring)
+               goto err;
+
+       bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1);
+       tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes,
+                                               &tx->complq_bus_dqo,
+                                               GFP_KERNEL);
+       if (!tx->dqo.compl_ring)
+               goto err;
+
+       tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources),
+                                            &tx->q_resources_bus, GFP_KERNEL);
+       if (!tx->q_resources)
+               goto err;
+
+       gve_tx_add_to_block(priv, idx);
+
+       return 0;
+
+err:
+       gve_tx_free_ring_dqo(priv, idx);
+       return -ENOMEM;
+}
+
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+               err = gve_tx_alloc_ring_dqo(priv, i);
+               if (err) {
+                       netif_err(priv, drv, priv->dev,
+                                 "Failed to alloc tx ring=%d: err=%d\n",
+                                 i, err);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       for (i--; i >= 0; i--)
+               gve_tx_free_ring_dqo(priv, i);
+
+       return err;
+}
+
+void gve_tx_free_rings_dqo(struct gve_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+               struct gve_tx_ring *tx = &priv->tx[i];
+
+               gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
+               netdev_tx_reset_queue(tx->netdev_txq);
+               gve_tx_clean_pending_packets(tx);
+
+               gve_tx_free_ring_dqo(priv, i);
+       }
+}
+
+/* Returns the number of slots available in the ring */
+static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
+{
+       u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask;
+
+       return tx->mask - num_used;
+}
+
+/* Stops the queue if available descriptors is less than 'count'.
+ * Return: 0 if stop is not required.
+ */
+static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count)
+{
+       if (likely(gve_has_pending_packet(tx) &&
+                  num_avail_tx_slots(tx) >= count))
+               return 0;
+
+       /* Update cached TX head pointer */
+       tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+       if (likely(gve_has_pending_packet(tx) &&
+                  num_avail_tx_slots(tx) >= count))
+               return 0;
+
+       /* No space, so stop the queue */
+       tx->stop_queue++;
+       netif_tx_stop_queue(tx->netdev_txq);
+
+       /* Sync with restarting queue in `gve_tx_poll_dqo()` */
+       mb();
+
+       /* After stopping queue, check if we can transmit again in order to
+        * avoid TOCTOU bug.
+        */
+       tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+       if (likely(!gve_has_pending_packet(tx) ||
+                  num_avail_tx_slots(tx) < count))
+               return -EBUSY;
+
+       netif_tx_start_queue(tx->netdev_txq);
+       tx->wake_queue++;
+       return 0;
+}
+
+static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
+                                       struct gve_tx_metadata_dqo *metadata)
+{
+       memset(metadata, 0, sizeof(*metadata));
+       metadata->version = GVE_TX_METADATA_VERSION_DQO;
+
+       if (skb->l4_hash) {
+               u16 path_hash = skb->hash ^ (skb->hash >> 16);
+
+               path_hash &= (1 << 15) - 1;
+               if (unlikely(path_hash == 0))
+                       path_hash = ~path_hash;
+
+               metadata->path_hash = path_hash;
+       }
+}
+
+static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
+                                    struct sk_buff *skb, u32 len, u64 addr,
+                                    s16 compl_tag, bool eop, bool is_gso)
+{
+       const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
+
+       while (len > 0) {
+               struct gve_tx_pkt_desc_dqo *desc =
+                       &tx->dqo.tx_ring[*desc_idx].pkt;
+               u32 cur_len = min_t(u32, len, GVE_TX_MAX_BUF_SIZE_DQO);
+               bool cur_eop = eop && cur_len == len;
+
+               *desc = (struct gve_tx_pkt_desc_dqo){
+                       .buf_addr = cpu_to_le64(addr),
+                       .dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
+                       .end_of_packet = cur_eop,
+                       .checksum_offload_enable = checksum_offload_en,
+                       .compl_tag = cpu_to_le16(compl_tag),
+                       .buf_size = cur_len,
+               };
+
+               addr += cur_len;
+               len -= cur_len;
+               *desc_idx = (*desc_idx + 1) & tx->mask;
+       }
+}
+
+/* Validates and prepares `skb` for TSO.
+ *
+ * Returns header length, or < 0 if invalid.
+ */
+static int gve_prep_tso(struct sk_buff *skb)
+{
+       struct tcphdr *tcp;
+       int header_len;
+       u32 paylen;
+       int err;
+
+       /* Note: HW requires MSS (gso_size) to be <= 9728 and the total length
+        * of the TSO to be <= 262143.
+        *
+        * However, we don't validate these because:
+        * - Hypervisor enforces a limit of 9K MTU
+        * - Kernel will not produce a TSO larger than 64k
+        */
+
+       if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
+               return -1;
+
+       /* Needed because we will modify header. */
+       err = skb_cow_head(skb, 0);
+       if (err < 0)
+               return err;
+
+       tcp = tcp_hdr(skb);
+
+       /* Remove payload length from checksum. */
+       paylen = skb->len - skb_transport_offset(skb);
+
+       switch (skb_shinfo(skb)->gso_type) {
+       case SKB_GSO_TCPV4:
+       case SKB_GSO_TCPV6:
+               csum_replace_by_diff(&tcp->check,
+                                    (__force __wsum)htonl(paylen));
+
+               /* Compute length of segmentation header. */
+               header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
+               return -EINVAL;
+
+       return header_len;
+}
+
+static void gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc,
+                                    const struct sk_buff *skb,
+                                    const struct gve_tx_metadata_dqo *metadata,
+                                    int header_len)
+{
+       *desc = (struct gve_tx_tso_context_desc_dqo){
+               .header_len = header_len,
+               .cmd_dtype = {
+                       .dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO,
+                       .tso = 1,
+               },
+               .flex0 = metadata->bytes[0],
+               .flex5 = metadata->bytes[5],
+               .flex6 = metadata->bytes[6],
+               .flex7 = metadata->bytes[7],
+               .flex8 = metadata->bytes[8],
+               .flex9 = metadata->bytes[9],
+               .flex10 = metadata->bytes[10],
+               .flex11 = metadata->bytes[11],
+       };
+       desc->tso_total_len = skb->len - header_len;
+       desc->mss = skb_shinfo(skb)->gso_size;
+}
+
+static void
+gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
+                            const struct gve_tx_metadata_dqo *metadata)
+{
+       *desc = (struct gve_tx_general_context_desc_dqo){
+               .flex0 = metadata->bytes[0],
+               .flex1 = metadata->bytes[1],
+               .flex2 = metadata->bytes[2],
+               .flex3 = metadata->bytes[3],
+               .flex4 = metadata->bytes[4],
+               .flex5 = metadata->bytes[5],
+               .flex6 = metadata->bytes[6],
+               .flex7 = metadata->bytes[7],
+               .flex8 = metadata->bytes[8],
+               .flex9 = metadata->bytes[9],
+               .flex10 = metadata->bytes[10],
+               .flex11 = metadata->bytes[11],
+               .cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO},
+       };
+}
+
+/* Returns 0 on success, or < 0 on error.
+ *
+ * Before this function is called, the caller must ensure
+ * gve_has_pending_packet(tx) returns true.
+ */
+static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
+                                     struct sk_buff *skb)
+{
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       const bool is_gso = skb_is_gso(skb);
+       u32 desc_idx = tx->dqo_tx.tail;
+
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       struct gve_tx_metadata_dqo metadata;
+       s16 completion_tag;
+       int i;
+
+       pending_packet = gve_alloc_pending_packet(tx);
+       pending_packet->skb = skb;
+       pending_packet->num_bufs = 0;
+       completion_tag = pending_packet - tx->dqo.pending_packets;
+
+       gve_extract_tx_metadata_dqo(skb, &metadata);
+       if (is_gso) {
+               int header_len = gve_prep_tso(skb);
+
+               if (unlikely(header_len < 0))
+                       goto err;
+
+               gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
+                                        skb, &metadata, header_len);
+               desc_idx = (desc_idx + 1) & tx->mask;
+       }
+
+       gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
+                                    &metadata);
+       desc_idx = (desc_idx + 1) & tx->mask;
+
+       /* Note: HW requires that the size of a non-TSO packet be within the
+        * range of [17, 9728].
+        *
+        * We don't double check because
+        * - We limited `netdev->min_mtu` to ETH_MIN_MTU.
+        * - Hypervisor won't allow MTU larger than 9216.
+        */
+
+       /* Map the linear portion of skb */
+       {
+               struct gve_tx_dma_buf *buf =
+                       &pending_packet->bufs[pending_packet->num_bufs];
+               u32 len = skb_headlen(skb);
+               dma_addr_t addr;
+
+               addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(tx->dev, addr)))
+                       goto err;
+
+               dma_unmap_len_set(buf, len, len);
+               dma_unmap_addr_set(buf, dma, addr);
+               ++pending_packet->num_bufs;
+
+               gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+                                        completion_tag,
+                                        /*eop=*/shinfo->nr_frags == 0, is_gso);
+       }
+
+       for (i = 0; i < shinfo->nr_frags; i++) {
+               struct gve_tx_dma_buf *buf =
+                       &pending_packet->bufs[pending_packet->num_bufs];
+               const skb_frag_t *frag = &shinfo->frags[i];
+               bool is_eop = i == (shinfo->nr_frags - 1);
+               u32 len = skb_frag_size(frag);
+               dma_addr_t addr;
+
+               addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(tx->dev, addr)))
+                       goto err;
+
+               dma_unmap_len_set(buf, len, len);
+               dma_unmap_addr_set(buf, dma, addr);
+               ++pending_packet->num_bufs;
+
+               gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+                                        completion_tag, is_eop, is_gso);
+       }
+
+       /* Commit the changes to our state */
+       tx->dqo_tx.tail = desc_idx;
+
+       /* Request a descriptor completion on the last descriptor of the
+        * packet if we are allowed to by the HW enforced interval.
+        */
+       {
+               u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+               u32 last_report_event_interval =
+                       (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+               if (unlikely(last_report_event_interval >=
+                            GVE_TX_MIN_RE_INTERVAL)) {
+                       tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+                       tx->dqo_tx.last_re_idx = last_desc_idx;
+               }
+       }
+
+       return 0;
+
+err:
+       for (i = 0; i < pending_packet->num_bufs; i++) {
+               struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
+
+               if (i == 0) {
+                       dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
+                                        dma_unmap_len(buf, len),
+                                        DMA_TO_DEVICE);
+               } else {
+                       dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
+                                      dma_unmap_len(buf, len), DMA_TO_DEVICE);
+               }
+       }
+
+       pending_packet->skb = NULL;
+       pending_packet->num_bufs = 0;
+       gve_free_pending_packet(tx, pending_packet);
+
+       return -1;
+}
+
+static int gve_num_descs_per_buf(size_t size)
+{
+       return DIV_ROUND_UP(size, GVE_TX_MAX_BUF_SIZE_DQO);
+}
+
+static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
+{
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       int num_descs;
+       int i;
+
+       num_descs = gve_num_descs_per_buf(skb_headlen(skb));
+
+       for (i = 0; i < shinfo->nr_frags; i++) {
+               unsigned int frag_size = skb_frag_size(&shinfo->frags[i]);
+
+               num_descs += gve_num_descs_per_buf(frag_size);
+       }
+
+       return num_descs;
+}
+
+/* Returns true if HW is capable of sending TSO represented by `skb`.
+ *
+ * Each segment must not span more than GVE_TX_MAX_DATA_DESCS buffers.
+ * - The header is counted as one buffer for every single segment.
+ * - A buffer which is split between two segments is counted for both.
+ * - If a buffer contains both header and payload, it is counted as two buffers.
+ */
+static bool gve_can_send_tso(const struct sk_buff *skb)
+{
+       const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
+       const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       const int gso_size = shinfo->gso_size;
+       int cur_seg_num_bufs;
+       int cur_seg_size;
+       int i;
+
+       cur_seg_size = skb_headlen(skb) - header_len;
+       cur_seg_num_bufs = cur_seg_size > 0;
+
+       for (i = 0; i < shinfo->nr_frags; i++) {
+               if (cur_seg_size >= gso_size) {
+                       cur_seg_size %= gso_size;
+                       cur_seg_num_bufs = cur_seg_size > 0;
+               }
+
+               if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
+                       return false;
+
+               cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+       }
+
+       return true;
+}
+
+/* Attempt to transmit specified SKB.
+ *
+ * Returns 0 if the SKB was transmitted or dropped.
+ * Returns -1 if there is not currently enough space to transmit the SKB.
+ */
+static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
+                         struct sk_buff *skb)
+{
+       int num_buffer_descs;
+       int total_num_descs;
+
+       if (skb_is_gso(skb)) {
+               /* If TSO doesn't meet HW requirements, attempt to linearize the
+                * packet.
+                */
+               if (unlikely(!gve_can_send_tso(skb) &&
+                            skb_linearize(skb) < 0)) {
+                       net_err_ratelimited("%s: Failed to transmit TSO packet\n",
+                                           priv->dev->name);
+                       goto drop;
+               }
+
+               num_buffer_descs = gve_num_buffer_descs_needed(skb);
+       } else {
+               num_buffer_descs = gve_num_buffer_descs_needed(skb);
+
+               if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
+                       if (unlikely(skb_linearize(skb) < 0))
+                               goto drop;
+
+                       num_buffer_descs = 1;
+               }
+       }
+
+       /* Metadata + (optional TSO) + data descriptors. */
+       total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
+       if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
+                       GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP))) {
+               return -1;
+       }
+
+       if (unlikely(gve_tx_add_skb_no_copy_dqo(tx, skb) < 0))
+               goto drop;
+
+       netdev_tx_sent_queue(tx->netdev_txq, skb->len);
+       skb_tx_timestamp(skb);
+       return 0;
+
+drop:
+       tx->dropped_pkt++;
+       dev_kfree_skb_any(skb);
+       return 0;
+}
+
+/* Transmit a given skb and ring the doorbell. */
+netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
+{
+       struct gve_priv *priv = netdev_priv(dev);
+       struct gve_tx_ring *tx;
+
+       tx = &priv->tx[skb_get_queue_mapping(skb)];
+       if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) {
+               /* We need to ring the txq doorbell -- we have stopped the Tx
+                * queue for want of resources, but prior calls to gve_tx()
+                * may have added descriptors without ringing the doorbell.
+                */
+               gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
+               return NETDEV_TX_OK;
+
+       gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+       return NETDEV_TX_OK;
+}
+
+static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
+                       struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       s16 old_tail, index;
+
+       index = pending_packet - tx->dqo.pending_packets;
+       old_tail = list->tail;
+       list->tail = index;
+       if (old_tail == -1)
+               list->head = index;
+       else
+               tx->dqo.pending_packets[old_tail].next = index;
+
+       pending_packet->next = -1;
+       pending_packet->prev = old_tail;
+}
+
+static void remove_from_list(struct gve_tx_ring *tx,
+                            struct gve_index_list *list,
+                            struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       s16 prev_index, next_index;
+
+       prev_index = pending_packet->prev;
+       next_index = pending_packet->next;
+
+       if (prev_index == -1) {
+               /* Node is head */
+               list->head = next_index;
+       } else {
+               tx->dqo.pending_packets[prev_index].next = next_index;
+       }
+       if (next_index == -1) {
+               /* Node is tail */
+               list->tail = prev_index;
+       } else {
+               tx->dqo.pending_packets[next_index].prev = prev_index;
+       }
+}
+
+static void gve_unmap_packet(struct device *dev,
+                            struct gve_tx_pending_packet_dqo *pending_packet)
+{
+       struct gve_tx_dma_buf *buf;
+       int i;
+
+       /* SKB linear portion is guaranteed to be mapped */
+       buf = &pending_packet->bufs[0];
+       dma_unmap_single(dev, dma_unmap_addr(buf, dma),
+                        dma_unmap_len(buf, len), DMA_TO_DEVICE);
+       for (i = 1; i < pending_packet->num_bufs; i++) {
+               buf = &pending_packet->bufs[i];
+               dma_unmap_page(dev, dma_unmap_addr(buf, dma),
+                              dma_unmap_len(buf, len), DMA_TO_DEVICE);
+       }
+       pending_packet->num_bufs = 0;
+}
+
+/* Completion types and expected behavior:
+ * No Miss compl + Packet compl = Packet completed normally.
+ * Miss compl + Re-inject compl = Packet completed normally.
+ * No Miss compl + Re-inject compl = Skipped i.e. packet not completed.
+ * Miss compl + Packet compl = Skipped i.e. packet not completed.
+ */
+static void gve_handle_packet_completion(struct gve_priv *priv,
+                                        struct gve_tx_ring *tx, bool is_napi,
+                                        u16 compl_tag, u64 *bytes, u64 *pkts,
+                                        bool is_reinjection)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+
+       if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
+               net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
+                                   priv->dev->name, (int)compl_tag);
+               return;
+       }
+
+       pending_packet = &tx->dqo.pending_packets[compl_tag];
+
+       if (unlikely(is_reinjection)) {
+               if (unlikely(pending_packet->state ==
+                            GVE_PACKET_STATE_TIMED_OUT_COMPL)) {
+                       net_err_ratelimited("%s: Re-injection completion: %d received after timeout.\n",
+                                           priv->dev->name, (int)compl_tag);
+                       /* Packet was already completed as a result of timeout,
+                        * so just remove from list and free pending packet.
+                        */
+                       remove_from_list(tx,
+                                        &tx->dqo_compl.timed_out_completions,
+                                        pending_packet);
+                       gve_free_pending_packet(tx, pending_packet);
+                       return;
+               }
+               if (unlikely(pending_packet->state !=
+                            GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
+                       /* No outstanding miss completion but packet allocated
+                        * implies packet receives a re-injection completion
+                        * without a a prior miss completion. Return without
+                        * completing the packet.
+                        */
+                       net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
+                                           priv->dev->name, (int)compl_tag);
+                       return;
+               }
+               remove_from_list(tx, &tx->dqo_compl.miss_completions,
+                                pending_packet);
+       } else {
+               /* Packet is allocated but not a pending data completion. */
+               if (unlikely(pending_packet->state !=
+                            GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
+                       net_err_ratelimited("%s: No pending data completion: %d\n",
+                                           priv->dev->name, (int)compl_tag);
+                       return;
+               }
+       }
+       gve_unmap_packet(tx->dev, pending_packet);
+
+       *bytes += pending_packet->skb->len;
+       (*pkts)++;
+       napi_consume_skb(pending_packet->skb, is_napi);
+       pending_packet->skb = NULL;
+       gve_free_pending_packet(tx, pending_packet);
+}
+
+static void gve_handle_miss_completion(struct gve_priv *priv,
+                                      struct gve_tx_ring *tx, u16 compl_tag,
+                                      u64 *bytes, u64 *pkts)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+
+       if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) {
+               net_err_ratelimited("%s: Invalid TX completion tag: %d\n",
+                                   priv->dev->name, (int)compl_tag);
+               return;
+       }
+
+       pending_packet = &tx->dqo.pending_packets[compl_tag];
+       if (unlikely(pending_packet->state !=
+                               GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
+               net_err_ratelimited("%s: Unexpected packet state: %d for completion tag : %d\n",
+                                   priv->dev->name, (int)pending_packet->state,
+                                   (int)compl_tag);
+               return;
+       }
+
+       pending_packet->state = GVE_PACKET_STATE_PENDING_REINJECT_COMPL;
+       /* jiffies can wraparound but time comparisons can handle overflows. */
+       pending_packet->timeout_jiffies =
+                       jiffies +
+                       msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
+                                        MSEC_PER_SEC);
+       add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
+
+       *bytes += pending_packet->skb->len;
+       (*pkts)++;
+}
+
+static void remove_miss_completions(struct gve_priv *priv,
+                                   struct gve_tx_ring *tx)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       s16 next_index;
+
+       next_index = tx->dqo_compl.miss_completions.head;
+       while (next_index != -1) {
+               pending_packet = &tx->dqo.pending_packets[next_index];
+               next_index = pending_packet->next;
+               /* Break early because packets should timeout in order. */
+               if (time_is_after_jiffies(pending_packet->timeout_jiffies))
+                       break;
+
+               remove_from_list(tx, &tx->dqo_compl.miss_completions,
+                                pending_packet);
+               /* Unmap buffers and free skb but do not unallocate packet i.e.
+                * the completion tag is not freed to ensure that the driver
+                * can take appropriate action if a corresponding valid
+                * completion is received later.
+                */
+               gve_unmap_packet(tx->dev, pending_packet);
+               /* This indicates the packet was dropped. */
+               dev_kfree_skb_any(pending_packet->skb);
+               pending_packet->skb = NULL;
+               tx->dropped_pkt++;
+               net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
+                                   priv->dev->name,
+                                   (int)(pending_packet - tx->dqo.pending_packets));
+
+               pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
+               pending_packet->timeout_jiffies =
+                               jiffies +
+                               msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
+                                                MSEC_PER_SEC);
+               /* Maintain pending packet in another list so the packet can be
+                * unallocated at a later time.
+                */
+               add_to_list(tx, &tx->dqo_compl.timed_out_completions,
+                           pending_packet);
+       }
+}
+
+static void remove_timed_out_completions(struct gve_priv *priv,
+                                        struct gve_tx_ring *tx)
+{
+       struct gve_tx_pending_packet_dqo *pending_packet;
+       s16 next_index;
+
+       next_index = tx->dqo_compl.timed_out_completions.head;
+       while (next_index != -1) {
+               pending_packet = &tx->dqo.pending_packets[next_index];
+               next_index = pending_packet->next;
+               /* Break early because packets should timeout in order. */
+               if (time_is_after_jiffies(pending_packet->timeout_jiffies))
+                       break;
+
+               remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
+                                pending_packet);
+               gve_free_pending_packet(tx, pending_packet);
+       }
+}
+
+int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+                         struct napi_struct *napi)
+{
+       u64 reinject_compl_bytes = 0;
+       u64 reinject_compl_pkts = 0;
+       int num_descs_cleaned = 0;
+       u64 miss_compl_bytes = 0;
+       u64 miss_compl_pkts = 0;
+       u64 pkt_compl_bytes = 0;
+       u64 pkt_compl_pkts = 0;
+
+       /* Limit in order to avoid blocking for too long */
+       while (!napi || pkt_compl_pkts < napi->weight) {
+               struct gve_tx_compl_desc *compl_desc =
+                       &tx->dqo.compl_ring[tx->dqo_compl.head];
+               u16 type;
+
+               if (compl_desc->generation == tx->dqo_compl.cur_gen_bit)
+                       break;
+
+               /* Prefetch the next descriptor. */
+               prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) &
+                               tx->dqo.complq_mask]);
+
+               /* Do not read data until we own the descriptor */
+               dma_rmb();
+               type = compl_desc->type;
+
+               if (type == GVE_COMPL_TYPE_DQO_DESC) {
+                       /* This is the last descriptor fetched by HW plus one */
+                       u16 tx_head = le16_to_cpu(compl_desc->tx_head);
+
+                       atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
+               } else if (type == GVE_COMPL_TYPE_DQO_PKT) {
+                       u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+                       gve_handle_packet_completion(priv, tx, !!napi,
+                                                    compl_tag,
+                                                    &pkt_compl_bytes,
+                                                    &pkt_compl_pkts,
+                                                    /*is_reinjection=*/false);
+               } else if (type == GVE_COMPL_TYPE_DQO_MISS) {
+                       u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+                       gve_handle_miss_completion(priv, tx, compl_tag,
+                                                  &miss_compl_bytes,
+                                                  &miss_compl_pkts);
+               } else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) {
+                       u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
+
+                       gve_handle_packet_completion(priv, tx, !!napi,
+                                                    compl_tag,
+                                                    &reinject_compl_bytes,
+                                                    &reinject_compl_pkts,
+                                                    /*is_reinjection=*/true);
+               }
+
+               tx->dqo_compl.head =
+                       (tx->dqo_compl.head + 1) & tx->dqo.complq_mask;
+               /* Flip the generation bit when we wrap around */
+               tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0;
+               num_descs_cleaned++;
+       }
+
+       netdev_tx_completed_queue(tx->netdev_txq,
+                                 pkt_compl_pkts + miss_compl_pkts,
+                                 pkt_compl_bytes + miss_compl_bytes);
+
+       remove_miss_completions(priv, tx);
+       remove_timed_out_completions(priv, tx);
+
+       u64_stats_update_begin(&tx->statss);
+       tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
+       tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
+       u64_stats_update_end(&tx->statss);
+       return num_descs_cleaned;
+}
+
+bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
+{
+       struct gve_tx_compl_desc *compl_desc;
+       struct gve_tx_ring *tx = block->tx;
+       struct gve_priv *priv = block->priv;
+
+       if (do_clean) {
+               int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx,
+                                                             &block->napi);
+
+               /* Sync with queue being stopped in `gve_maybe_stop_tx_dqo()` */
+               mb();
+
+               if (netif_tx_queue_stopped(tx->netdev_txq) &&
+                   num_descs_cleaned > 0) {
+                       tx->wake_queue++;
+                       netif_tx_wake_queue(tx->netdev_txq);
+               }
+       }
+
+       /* Return true if we still have work. */
+       compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+       return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
new file mode 100644 (file)
index 0000000..93f3dcb
--- /dev/null
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_utils.h"
+
+void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+       struct gve_notify_block *block =
+                       &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
+
+       block->tx = NULL;
+}
+
+void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+       int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
+       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+       struct gve_tx_ring *tx = &priv->tx[queue_idx];
+
+       block->tx = tx;
+       tx->ntfy_id = ntfy_idx;
+}
+
+void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
+{
+       struct gve_notify_block *block =
+                       &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
+
+       block->rx = NULL;
+}
+
+void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
+{
+       u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
+       struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+       struct gve_rx_ring *rx = &priv->rx[queue_idx];
+
+       block->rx = rx;
+       rx->ntfy_id = ntfy_idx;
+}
+
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+                           struct gve_rx_slot_page_info *page_info, u16 len,
+                           u16 pad)
+{
+       struct sk_buff *skb = napi_alloc_skb(napi, len);
+       void *va = page_info->page_address + pad +
+                  page_info->page_offset;
+
+       if (unlikely(!skb))
+               return NULL;
+
+       __skb_put(skb, len);
+
+       skb_copy_to_linear_data(skb, va, len);
+
+       skb->protocol = eth_type_trans(skb, dev);
+
+       return skb;
+}
+
+void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
+{
+       page_info->pagecnt_bias--;
+       if (page_info->pagecnt_bias == 0) {
+               int pagecount = page_count(page_info->page);
+
+               /* If we have run out of bias - set it back up to INT_MAX
+                * minus the existing refs.
+                */
+               page_info->pagecnt_bias = INT_MAX - pagecount;
+
+               /* Set pagecount back up to max. */
+               page_ref_add(page_info->page, INT_MAX - pagecount);
+       }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
new file mode 100644 (file)
index 0000000..7959594
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2021 Google, Inc.
+ */
+
+#ifndef _GVE_UTILS_H
+#define _GVE_UTILS_H
+
+#include <linux/etherdevice.h>
+
+#include "gve.h"
+
+void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx);
+void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx);
+
+void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
+void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
+
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+                           struct gve_rx_slot_page_info *page_info, u16 len,
+                           u16 pad);
+
+/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
+void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
+
+#endif /* _GVE_UTILS_H */
+
index 0b202f4..e0b7c3c 100644 (file)
@@ -290,8 +290,10 @@ enum hnae3_dbg_cmd {
        HNAE3_DBG_CMD_RX_QUEUE_INFO,
        HNAE3_DBG_CMD_TX_QUEUE_INFO,
        HNAE3_DBG_CMD_FD_TCAM,
+       HNAE3_DBG_CMD_FD_COUNTER,
        HNAE3_DBG_CMD_MAC_TNL_STATUS,
        HNAE3_DBG_CMD_SERV_INFO,
+       HNAE3_DBG_CMD_UMV_INFO,
        HNAE3_DBG_CMD_UNKNOWN,
 };
 
index 34b6cd9..5325230 100644 (file)
@@ -323,6 +323,20 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
                .buf_len = HNS3_DBG_READ_LEN,
                .init = hns3_dbg_common_file_init,
        },
+       {
+               .name = "fd_counter",
+               .cmd = HNAE3_DBG_CMD_FD_COUNTER,
+               .dentry = HNS3_DBG_DENTRY_FD,
+               .buf_len = HNS3_DBG_READ_LEN,
+               .init = hns3_dbg_common_file_init,
+       },
+       {
+               .name = "umv_info",
+               .cmd = HNAE3_DBG_CMD_UMV_INFO,
+               .dentry = HNS3_DBG_DENTRY_COMMON,
+               .buf_len = HNS3_DBG_READ_LEN,
+               .init = hns3_dbg_common_file_init,
+       },
 };
 
 static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
index 51bbf5f..cdb5f14 100644 (file)
@@ -3537,21 +3537,33 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
        int size = le16_to_cpu(desc->rx.size);
        u32 truesize = hns3_buf_size(ring);
        u32 frag_size = size - pull_len;
+       bool reused;
 
        /* Avoid re-using remote or pfmem page */
        if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
                goto out;
 
-       /* Stack is not using and current page_offset is non-zero, we can
-        * reuse from the zero offset.
+       reused = hns3_can_reuse_page(desc_cb);
+
+       /* Rx page can be reused when:
+        * 1. Rx page is only owned by the driver when page_offset
+        *    is zero, which means 0 @ truesize will be used by
+        *    stack after skb_add_rx_frag() is called, and the rest
+        *    of rx page can be reused by driver.
+        * Or
+        * 2. Rx page is only owned by the driver when page_offset
+        *    is non-zero, which means page_offset @ truesize will
+        *    be used by stack after skb_add_rx_frag() is called,
+        *    and 0 @ truesize can be reused by driver.
         */
-       if (desc_cb->page_offset && hns3_can_reuse_page(desc_cb)) {
-               desc_cb->page_offset = 0;
-               desc_cb->reuse_flag = 1;
-       } else if (desc_cb->page_offset + truesize * 2 <=
-                  hns3_page_size(ring)) {
+       if ((!desc_cb->page_offset && reused) ||
+           ((desc_cb->page_offset + truesize + truesize) <=
+            hns3_page_size(ring) && desc_cb->page_offset)) {
                desc_cb->page_offset += truesize;
                desc_cb->reuse_flag = 1;
+       } else if (desc_cb->page_offset && reused) {
+               desc_cb->page_offset = 0;
+               desc_cb->reuse_flag = 1;
        } else if (frag_size <= ring->rx_copybreak) {
                void *frag = napi_alloc_frag(frag_size);
 
index a322dfe..18bde77 100644 (file)
@@ -248,6 +248,7 @@ enum hclge_opcode_type {
        HCLGE_OPC_FD_KEY_CONFIG         = 0x1202,
        HCLGE_OPC_FD_TCAM_OP            = 0x1203,
        HCLGE_OPC_FD_AD_OP              = 0x1204,
+       HCLGE_OPC_FD_CNT_OP             = 0x1205,
        HCLGE_OPC_FD_USER_DEF_OP        = 0x1207,
 
        /* MDIO command */
@@ -1109,6 +1110,14 @@ struct hclge_fd_ad_config_cmd {
        u8 rsv2[8];
 };
 
+struct hclge_fd_ad_cnt_read_cmd {
+       u8 rsv0[4];
+       __le16 index;
+       u8 rsv1[2];
+       __le64 cnt;
+       u8 rsv2[8];
+};
+
 #define HCLGE_FD_USER_DEF_OFT_S                0
 #define HCLGE_FD_USER_DEF_OFT_M                GENMASK(14, 0)
 #define HCLGE_FD_USER_DEF_EN_B         15
index 6fc50d0..2887881 100644 (file)
@@ -1549,6 +1549,39 @@ out:
        return ret;
 }
 
+static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
+{
+       u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+       struct hclge_fd_ad_cnt_read_cmd *req;
+       char str_id[HCLGE_DBG_ID_LEN];
+       struct hclge_desc desc;
+       int pos = 0;
+       int ret;
+       u64 cnt;
+       u8 i;
+
+       pos += scnprintf(buf + pos, len - pos,
+                        "func_id\thit_times\n");
+
+       for (i = 0; i < func_num; i++) {
+               hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
+               req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
+               req->index = cpu_to_le16(i);
+               ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
+                               ret);
+                       return ret;
+               }
+               cnt = le64_to_cpu(req->cnt);
+               hclge_dbg_get_func_id_str(str_id, i);
+               pos += scnprintf(buf + pos, len - pos,
+                                "%s\t%llu\n", str_id, cnt);
+       }
+
+       return 0;
+}
+
 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
 {
        int pos = 0;
@@ -1894,6 +1927,36 @@ static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
        }
 }
 
+static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
+{
+       u8 func_num = pci_num_vf(hdev->pdev) + 1;
+       struct hclge_vport *vport;
+       int pos = 0;
+       u8 i;
+
+       pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
+                         hdev->num_alloc_vport);
+       pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
+                        hdev->max_umv_size);
+       pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
+                        hdev->wanted_umv_size);
+       pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
+                        hdev->priv_umv_size);
+
+       mutex_lock(&hdev->vport_lock);
+       pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
+                        hdev->share_umv_size);
+       for (i = 0; i < func_num; i++) {
+               vport = &hdev->vport[i];
+               pos += scnprintf(buf + pos, len - pos,
+                                "vport(%u) used_umv_num : %u\n",
+                                i, vport->used_umv_num);
+       }
+       mutex_unlock(&hdev->vport_lock);
+
+       return 0;
+}
+
 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
                                         struct hclge_dbg_vlan_cfg *vlan_cfg)
 {
@@ -2375,6 +2438,14 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
                .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
                .dbg_dump = hclge_dbg_dump_vlan_config,
        },
+       {
+               .cmd = HNAE3_DBG_CMD_FD_COUNTER,
+               .dbg_dump = hclge_dbg_dump_fd_counter,
+       },
+       {
+               .cmd = HNAE3_DBG_CMD_UMV_INFO,
+               .dbg_dump = hclge_dbg_dump_umv_info,
+       },
 };
 
 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
index bad9fda..ec9a7f8 100644 (file)
@@ -2330,8 +2330,10 @@ int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev)
        buf_size = buf_len / sizeof(u32);
 
        desc_data = kzalloc(buf_len, GFP_KERNEL);
-       if (!desc_data)
-               return -ENOMEM;
+       if (!desc_data) {
+               ret = -ENOMEM;
+               goto err_desc;
+       }
 
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf) {
index f3e482a..dd3354a 100644 (file)
@@ -6000,8 +6000,14 @@ static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
                ad_data.queue_id = rule->queue_id;
        }
 
-       ad_data.use_counter = false;
-       ad_data.counter_id = 0;
+       if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
+               ad_data.use_counter = true;
+               ad_data.counter_id = rule->vf_id %
+                                    hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
+       } else {
+               ad_data.use_counter = false;
+               ad_data.counter_id = 0;
+       }
 
        ad_data.use_next_stage = false;
        ad_data.next_input_key = 0;
index b3eb8f1..3b1f845 100644 (file)
@@ -415,8 +415,6 @@ int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
 
 static int hclge_ptp_create_clock(struct hclge_dev *hdev)
 {
-#define HCLGE_PTP_NAME_LEN     32
-
        struct hclge_ptp *ptp;
 
        ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL);
@@ -424,7 +422,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
                return -ENOMEM;
 
        ptp->hdev = hdev;
-       snprintf(ptp->info.name, HCLGE_PTP_NAME_LEN, "%s",
+       snprintf(ptp->info.name, sizeof(ptp->info.name), "%s",
                 HCLGE_DRIVER_NAME);
        ptp->info.owner = THIS_MODULE;
        ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
index b3ca7af..5a202b7 100644 (file)
@@ -43,9 +43,9 @@
 #define HCLGE_PTP_SEC_H_OFFSET         32u
 #define HCLGE_PTP_SEC_L_MASK           GENMASK(31, 0)
 
-#define HCLGE_PTP_FLAG_EN              BIT(0)
-#define HCLGE_PTP_FLAG_TX_EN           BIT(1)
-#define HCLGE_PTP_FLAG_RX_EN           BIT(2)
+#define HCLGE_PTP_FLAG_EN              0
+#define HCLGE_PTP_FLAG_TX_EN           1
+#define HCLGE_PTP_FLAG_RX_EN           2
 
 struct hclge_ptp {
        struct hclge_dev *hdev;
index bc67a7e..737ba85 100644 (file)
@@ -1285,36 +1285,41 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
                iph_proto = iph6->nexthdr;
        }
 
-       /* In OVS environment, when a flow is not cached, specifically for a
-        * new TCP connection, the first packet information is passed up
+       /* When CSO is enabled the TCP checksum may have be set to NULL by
+        * the sender given that we zeroed out TCP checksum field in
+        * transmit path (refer ibmveth_start_xmit routine). In this case set
+        * up CHECKSUM_PARTIAL. If the packet is forwarded, the checksum will
+        * then be recalculated by the destination NIC (CSO must be enabled
+        * on the destination NIC).
+        *
+        * In an OVS environment, when a flow is not cached, specifically for a
+        * new TCP connection, the first packet information is passed up to
         * the user space for finding a flow. During this process, OVS computes
         * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
         *
-        * Given that we zeroed out TCP checksum field in transmit path
-        * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
-        * OVS computed checksum will be incorrect w/o TCP pseudo checksum
-        * in the packet. This leads to OVS dropping the packet and hence
-        * TCP retransmissions are seen.
-        *
-        * So, re-compute TCP pseudo header checksum.
+        * So, re-compute TCP pseudo header checksum when configured for
+        * trunk mode.
         */
-       if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
+       if (iph_proto == IPPROTO_TCP) {
                struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
-
-               tcphdrlen = skb->len - iphlen;
-
-               /* Recompute TCP pseudo header checksum */
-               if (skb_proto == ETH_P_IP)
-                       tcph->check = ~csum_tcpudp_magic(iph->saddr,
+               if (tcph->check == 0x0000) {
+                       /* Recompute TCP pseudo header checksum  */
+                       if (adapter->is_active_trunk) {
+                               tcphdrlen = skb->len - iphlen;
+                               if (skb_proto == ETH_P_IP)
+                                       tcph->check =
+                                        ~csum_tcpudp_magic(iph->saddr,
                                        iph->daddr, tcphdrlen, iph_proto, 0);
-               else if (skb_proto == ETH_P_IPV6)
-                       tcph->check = ~csum_ipv6_magic(&iph6->saddr,
+                               else if (skb_proto == ETH_P_IPV6)
+                                       tcph->check =
+                                        ~csum_ipv6_magic(&iph6->saddr,
                                        &iph6->daddr, tcphdrlen, iph_proto, 0);
-
-               /* Setup SKB fields for checksum offload */
-               skb_partial_csum_set(skb, iphlen,
-                                    offsetof(struct tcphdr, check));
-               skb_reset_network_header(skb);
+                       }
+                       /* Setup SKB fields for checksum offload */
+                       skb_partial_csum_set(skb, iphlen,
+                                            offsetof(struct tcphdr, check));
+                       skb_reset_network_header(skb);
+               }
        }
 }
 
index 2d8804e..adb0d5c 100644 (file)
@@ -3909,21 +3909,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
        vlcd->type = 1;
        len = strlen(os_name) + 1;
        vlcd->len = cpu_to_be16(len);
-       strncpy(vlcd->name, os_name, len);
+       strscpy(vlcd->name, os_name, len);
        vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
 
        /* Type 2 - LPAR name */
        vlcd->type = 2;
        len = strlen(utsname()->nodename) + 1;
        vlcd->len = cpu_to_be16(len);
-       strncpy(vlcd->name, utsname()->nodename, len);
+       strscpy(vlcd->name, utsname()->nodename, len);
        vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
 
        /* Type 3 - device name */
        vlcd->type = 3;
        len = strlen(adapter->netdev->name) + 1;
        vlcd->len = cpu_to_be16(len);
-       strncpy(vlcd->name, adapter->netdev->name, len);
+       strscpy(vlcd->name, adapter->netdev->name, len);
 }
 
 static int send_login(struct ibmvnic_adapter *adapter)
index 67cb0b4..b4d3fed 100644 (file)
@@ -552,9 +552,9 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
  * ENDIF
  */
 
-/* macro to make the table lines short */
+/* macro to make the table lines short, use explicit indexing with [PTYPE] */
 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
-       {       PTYPE, \
+       [PTYPE] = { \
                1, \
                I40E_RX_PTYPE_OUTER_##OUTER_IP, \
                I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
@@ -565,16 +565,15 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
                I40E_RX_PTYPE_INNER_PROT_##I, \
                I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
 
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
-               { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 
 /* shorter macros makes the table fit but are terse */
 #define I40E_RX_PTYPE_NOF              I40E_RX_PTYPE_NOT_FRAG
 #define I40E_RX_PTYPE_FRG              I40E_RX_PTYPE_FRAG
 #define I40E_RX_PTYPE_INNER_PROT_TS    I40E_RX_PTYPE_INNER_PROT_TIMESYNC
 
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
        /* L2 Packet types */
        I40E_PTT_UNUSED_ENTRY(0),
        I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
@@ -780,118 +779,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
        I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
 
        /* unused entries */
-       I40E_PTT_UNUSED_ENTRY(154),
-       I40E_PTT_UNUSED_ENTRY(155),
-       I40E_PTT_UNUSED_ENTRY(156),
-       I40E_PTT_UNUSED_ENTRY(157),
-       I40E_PTT_UNUSED_ENTRY(158),
-       I40E_PTT_UNUSED_ENTRY(159),
-
-       I40E_PTT_UNUSED_ENTRY(160),
-       I40E_PTT_UNUSED_ENTRY(161),
-       I40E_PTT_UNUSED_ENTRY(162),
-       I40E_PTT_UNUSED_ENTRY(163),
-       I40E_PTT_UNUSED_ENTRY(164),
-       I40E_PTT_UNUSED_ENTRY(165),
-       I40E_PTT_UNUSED_ENTRY(166),
-       I40E_PTT_UNUSED_ENTRY(167),
-       I40E_PTT_UNUSED_ENTRY(168),
-       I40E_PTT_UNUSED_ENTRY(169),
-
-       I40E_PTT_UNUSED_ENTRY(170),
-       I40E_PTT_UNUSED_ENTRY(171),
-       I40E_PTT_UNUSED_ENTRY(172),
-       I40E_PTT_UNUSED_ENTRY(173),
-       I40E_PTT_UNUSED_ENTRY(174),
-       I40E_PTT_UNUSED_ENTRY(175),
-       I40E_PTT_UNUSED_ENTRY(176),
-       I40E_PTT_UNUSED_ENTRY(177),
-       I40E_PTT_UNUSED_ENTRY(178),
-       I40E_PTT_UNUSED_ENTRY(179),
-
-       I40E_PTT_UNUSED_ENTRY(180),
-       I40E_PTT_UNUSED_ENTRY(181),
-       I40E_PTT_UNUSED_ENTRY(182),
-       I40E_PTT_UNUSED_ENTRY(183),
-       I40E_PTT_UNUSED_ENTRY(184),
-       I40E_PTT_UNUSED_ENTRY(185),
-       I40E_PTT_UNUSED_ENTRY(186),
-       I40E_PTT_UNUSED_ENTRY(187),
-       I40E_PTT_UNUSED_ENTRY(188),
-       I40E_PTT_UNUSED_ENTRY(189),
-
-       I40E_PTT_UNUSED_ENTRY(190),
-       I40E_PTT_UNUSED_ENTRY(191),
-       I40E_PTT_UNUSED_ENTRY(192),
-       I40E_PTT_UNUSED_ENTRY(193),
-       I40E_PTT_UNUSED_ENTRY(194),
-       I40E_PTT_UNUSED_ENTRY(195),
-       I40E_PTT_UNUSED_ENTRY(196),
-       I40E_PTT_UNUSED_ENTRY(197),
-       I40E_PTT_UNUSED_ENTRY(198),
-       I40E_PTT_UNUSED_ENTRY(199),
-
-       I40E_PTT_UNUSED_ENTRY(200),
-       I40E_PTT_UNUSED_ENTRY(201),
-       I40E_PTT_UNUSED_ENTRY(202),
-       I40E_PTT_UNUSED_ENTRY(203),
-       I40E_PTT_UNUSED_ENTRY(204),
-       I40E_PTT_UNUSED_ENTRY(205),
-       I40E_PTT_UNUSED_ENTRY(206),
-       I40E_PTT_UNUSED_ENTRY(207),
-       I40E_PTT_UNUSED_ENTRY(208),
-       I40E_PTT_UNUSED_ENTRY(209),
-
-       I40E_PTT_UNUSED_ENTRY(210),
-       I40E_PTT_UNUSED_ENTRY(211),
-       I40E_PTT_UNUSED_ENTRY(212),
-       I40E_PTT_UNUSED_ENTRY(213),
-       I40E_PTT_UNUSED_ENTRY(214),
-       I40E_PTT_UNUSED_ENTRY(215),
-       I40E_PTT_UNUSED_ENTRY(216),
-       I40E_PTT_UNUSED_ENTRY(217),
-       I40E_PTT_UNUSED_ENTRY(218),
-       I40E_PTT_UNUSED_ENTRY(219),
-
-       I40E_PTT_UNUSED_ENTRY(220),
-       I40E_PTT_UNUSED_ENTRY(221),
-       I40E_PTT_UNUSED_ENTRY(222),
-       I40E_PTT_UNUSED_ENTRY(223),
-       I40E_PTT_UNUSED_ENTRY(224),
-       I40E_PTT_UNUSED_ENTRY(225),
-       I40E_PTT_UNUSED_ENTRY(226),
-       I40E_PTT_UNUSED_ENTRY(227),
-       I40E_PTT_UNUSED_ENTRY(228),
-       I40E_PTT_UNUSED_ENTRY(229),
-
-       I40E_PTT_UNUSED_ENTRY(230),
-       I40E_PTT_UNUSED_ENTRY(231),
-       I40E_PTT_UNUSED_ENTRY(232),
-       I40E_PTT_UNUSED_ENTRY(233),
-       I40E_PTT_UNUSED_ENTRY(234),
-       I40E_PTT_UNUSED_ENTRY(235),
-       I40E_PTT_UNUSED_ENTRY(236),
-       I40E_PTT_UNUSED_ENTRY(237),
-       I40E_PTT_UNUSED_ENTRY(238),
-       I40E_PTT_UNUSED_ENTRY(239),
-
-       I40E_PTT_UNUSED_ENTRY(240),
-       I40E_PTT_UNUSED_ENTRY(241),
-       I40E_PTT_UNUSED_ENTRY(242),
-       I40E_PTT_UNUSED_ENTRY(243),
-       I40E_PTT_UNUSED_ENTRY(244),
-       I40E_PTT_UNUSED_ENTRY(245),
-       I40E_PTT_UNUSED_ENTRY(246),
-       I40E_PTT_UNUSED_ENTRY(247),
-       I40E_PTT_UNUSED_ENTRY(248),
-       I40E_PTT_UNUSED_ENTRY(249),
-
-       I40E_PTT_UNUSED_ENTRY(250),
-       I40E_PTT_UNUSED_ENTRY(251),
-       I40E_PTT_UNUSED_ENTRY(252),
-       I40E_PTT_UNUSED_ENTRY(253),
-       I40E_PTT_UNUSED_ENTRY(254),
-       I40E_PTT_UNUSED_ENTRY(255)
+       [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 };
 
 /**
index c81109a..36a4ca1 100644 (file)
@@ -804,7 +804,6 @@ enum i40e_rx_l2_ptype {
 };
 
 struct i40e_rx_ptype_decoded {
-       u32 ptype:8;
        u32 known:1;
        u32 outer_ip:1;
        u32 outer_ip_ver:1;
index 8547fc8..e9cc7f6 100644 (file)
@@ -522,9 +522,9 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
  * ENDIF
  */
 
-/* macro to make the table lines short */
+/* macro to make the table lines short, use explicit indexing with [PTYPE] */
 #define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
-       {       PTYPE, \
+       [PTYPE] = { \
                1, \
                IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
                IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
@@ -535,16 +535,15 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
                IAVF_RX_PTYPE_INNER_PROT_##I, \
                IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
 
-#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \
-               { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 
 /* shorter macros makes the table fit but are terse */
 #define IAVF_RX_PTYPE_NOF              IAVF_RX_PTYPE_NOT_FRAG
 #define IAVF_RX_PTYPE_FRG              IAVF_RX_PTYPE_FRAG
 #define IAVF_RX_PTYPE_INNER_PROT_TS    IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
 
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
+/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */
+struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
        /* L2 Packet types */
        IAVF_PTT_UNUSED_ENTRY(0),
        IAVF_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
@@ -750,118 +749,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
        IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
 
        /* unused entries */
-       IAVF_PTT_UNUSED_ENTRY(154),
-       IAVF_PTT_UNUSED_ENTRY(155),
-       IAVF_PTT_UNUSED_ENTRY(156),
-       IAVF_PTT_UNUSED_ENTRY(157),
-       IAVF_PTT_UNUSED_ENTRY(158),
-       IAVF_PTT_UNUSED_ENTRY(159),
-
-       IAVF_PTT_UNUSED_ENTRY(160),
-       IAVF_PTT_UNUSED_ENTRY(161),
-       IAVF_PTT_UNUSED_ENTRY(162),
-       IAVF_PTT_UNUSED_ENTRY(163),
-       IAVF_PTT_UNUSED_ENTRY(164),
-       IAVF_PTT_UNUSED_ENTRY(165),
-       IAVF_PTT_UNUSED_ENTRY(166),
-       IAVF_PTT_UNUSED_ENTRY(167),
-       IAVF_PTT_UNUSED_ENTRY(168),
-       IAVF_PTT_UNUSED_ENTRY(169),
-
-       IAVF_PTT_UNUSED_ENTRY(170),
-       IAVF_PTT_UNUSED_ENTRY(171),
-       IAVF_PTT_UNUSED_ENTRY(172),
-       IAVF_PTT_UNUSED_ENTRY(173),
-       IAVF_PTT_UNUSED_ENTRY(174),
-       IAVF_PTT_UNUSED_ENTRY(175),
-       IAVF_PTT_UNUSED_ENTRY(176),
-       IAVF_PTT_UNUSED_ENTRY(177),
-       IAVF_PTT_UNUSED_ENTRY(178),
-       IAVF_PTT_UNUSED_ENTRY(179),
-
-       IAVF_PTT_UNUSED_ENTRY(180),
-       IAVF_PTT_UNUSED_ENTRY(181),
-       IAVF_PTT_UNUSED_ENTRY(182),
-       IAVF_PTT_UNUSED_ENTRY(183),
-       IAVF_PTT_UNUSED_ENTRY(184),
-       IAVF_PTT_UNUSED_ENTRY(185),
-       IAVF_PTT_UNUSED_ENTRY(186),
-       IAVF_PTT_UNUSED_ENTRY(187),
-       IAVF_PTT_UNUSED_ENTRY(188),
-       IAVF_PTT_UNUSED_ENTRY(189),
-
-       IAVF_PTT_UNUSED_ENTRY(190),
-       IAVF_PTT_UNUSED_ENTRY(191),
-       IAVF_PTT_UNUSED_ENTRY(192),
-       IAVF_PTT_UNUSED_ENTRY(193),
-       IAVF_PTT_UNUSED_ENTRY(194),
-       IAVF_PTT_UNUSED_ENTRY(195),
-       IAVF_PTT_UNUSED_ENTRY(196),
-       IAVF_PTT_UNUSED_ENTRY(197),
-       IAVF_PTT_UNUSED_ENTRY(198),
-       IAVF_PTT_UNUSED_ENTRY(199),
-
-       IAVF_PTT_UNUSED_ENTRY(200),
-       IAVF_PTT_UNUSED_ENTRY(201),
-       IAVF_PTT_UNUSED_ENTRY(202),
-       IAVF_PTT_UNUSED_ENTRY(203),
-       IAVF_PTT_UNUSED_ENTRY(204),
-       IAVF_PTT_UNUSED_ENTRY(205),
-       IAVF_PTT_UNUSED_ENTRY(206),
-       IAVF_PTT_UNUSED_ENTRY(207),
-       IAVF_PTT_UNUSED_ENTRY(208),
-       IAVF_PTT_UNUSED_ENTRY(209),
-
-       IAVF_PTT_UNUSED_ENTRY(210),
-       IAVF_PTT_UNUSED_ENTRY(211),
-       IAVF_PTT_UNUSED_ENTRY(212),
-       IAVF_PTT_UNUSED_ENTRY(213),
-       IAVF_PTT_UNUSED_ENTRY(214),
-       IAVF_PTT_UNUSED_ENTRY(215),
-       IAVF_PTT_UNUSED_ENTRY(216),
-       IAVF_PTT_UNUSED_ENTRY(217),
-       IAVF_PTT_UNUSED_ENTRY(218),
-       IAVF_PTT_UNUSED_ENTRY(219),
-
-       IAVF_PTT_UNUSED_ENTRY(220),
-       IAVF_PTT_UNUSED_ENTRY(221),
-       IAVF_PTT_UNUSED_ENTRY(222),
-       IAVF_PTT_UNUSED_ENTRY(223),
-       IAVF_PTT_UNUSED_ENTRY(224),
-       IAVF_PTT_UNUSED_ENTRY(225),
-       IAVF_PTT_UNUSED_ENTRY(226),
-       IAVF_PTT_UNUSED_ENTRY(227),
-       IAVF_PTT_UNUSED_ENTRY(228),
-       IAVF_PTT_UNUSED_ENTRY(229),
-
-       IAVF_PTT_UNUSED_ENTRY(230),
-       IAVF_PTT_UNUSED_ENTRY(231),
-       IAVF_PTT_UNUSED_ENTRY(232),
-       IAVF_PTT_UNUSED_ENTRY(233),
-       IAVF_PTT_UNUSED_ENTRY(234),
-       IAVF_PTT_UNUSED_ENTRY(235),
-       IAVF_PTT_UNUSED_ENTRY(236),
-       IAVF_PTT_UNUSED_ENTRY(237),
-       IAVF_PTT_UNUSED_ENTRY(238),
-       IAVF_PTT_UNUSED_ENTRY(239),
-
-       IAVF_PTT_UNUSED_ENTRY(240),
-       IAVF_PTT_UNUSED_ENTRY(241),
-       IAVF_PTT_UNUSED_ENTRY(242),
-       IAVF_PTT_UNUSED_ENTRY(243),
-       IAVF_PTT_UNUSED_ENTRY(244),
-       IAVF_PTT_UNUSED_ENTRY(245),
-       IAVF_PTT_UNUSED_ENTRY(246),
-       IAVF_PTT_UNUSED_ENTRY(247),
-       IAVF_PTT_UNUSED_ENTRY(248),
-       IAVF_PTT_UNUSED_ENTRY(249),
-
-       IAVF_PTT_UNUSED_ENTRY(250),
-       IAVF_PTT_UNUSED_ENTRY(251),
-       IAVF_PTT_UNUSED_ENTRY(252),
-       IAVF_PTT_UNUSED_ENTRY(253),
-       IAVF_PTT_UNUSED_ENTRY(254),
-       IAVF_PTT_UNUSED_ENTRY(255)
+       [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 };
 
 /**
index de9fda7..9f1f523 100644 (file)
@@ -370,7 +370,6 @@ enum iavf_rx_l2_ptype {
 };
 
 struct iavf_rx_ptype_decoded {
-       u32 ptype:8;
        u32 known:1;
        u32 outer_ip:1;
        u32 outer_ip_ver:1;
index 857dc62..926cf74 100644 (file)
@@ -316,8 +316,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
 
        /* Notify AUX drivers about impending change to TCs */
        event = kzalloc(sizeof(*event), GFP_KERNEL);
-       if (!event)
-               return -ENOMEM;
+       if (!event) {
+               ret = -ENOMEM;
+               goto free_cfg;
+       }
 
        set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
        ice_send_event_to_aux(pf, event);
index 6989a76..76021d9 100644 (file)
 #define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S                4
 #define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M                ICE_M(0x3, 4)
 #define GLGEN_CLKSTAT_SRC                      0x000B826C
+#define GLGEN_GPIO_CTL(_i)                     (0x000880C8 + ((_i) * 4))
+#define GLGEN_GPIO_CTL_PIN_DIR_M               BIT(4)
+#define GLGEN_GPIO_CTL_PIN_FUNC_S              8
+#define GLGEN_GPIO_CTL_PIN_FUNC_M              ICE_M(0xF, 8)
 #define GLGEN_RSTAT                            0x000B8188
 #define GLGEN_RSTAT_DEVSTATE_M                 ICE_M(0x3, 0)
 #define GLGEN_RSTCTL                           0x000B8180
 #define PFINT_MBX_CTL_CAUSE_ENA_M              BIT(30)
 #define PFINT_OICR                             0x0016CA00
 #define PFINT_OICR_TSYN_TX_M                   BIT(11)
+#define PFINT_OICR_TSYN_EVNT_M                 BIT(12)
 #define PFINT_OICR_ECC_ERR_M                   BIT(16)
 #define PFINT_OICR_MAL_DETECT_M                        BIT(19)
 #define PFINT_OICR_GRST_M                      BIT(20)
 #define GLV_UPRCL(_i)                          (0x003B2000 + ((_i) * 8))
 #define GLV_UPTCL(_i)                          (0x0030A000 + ((_i) * 8))
 #define PRTRPB_RDPC                            0x000AC260
+#define GLTSYN_AUX_IN_0(_i)                    (0x000889D8 + ((_i) * 4))
+#define GLTSYN_AUX_IN_0_INT_ENA_M              BIT(4)
+#define GLTSYN_AUX_OUT_0(_i)                   (0x00088998 + ((_i) * 4))
+#define GLTSYN_AUX_OUT_0_OUT_ENA_M             BIT(0)
+#define GLTSYN_AUX_OUT_0_OUTMOD_M              ICE_M(0x3, 1)
+#define GLTSYN_CLKO_0(_i)                      (0x000889B8 + ((_i) * 4))
 #define GLTSYN_CMD                             0x00088810
 #define GLTSYN_CMD_SYNC                                0x00088814
 #define GLTSYN_ENA(_i)                         (0x00088808 + ((_i) * 4))
 #define GLTSYN_ENA_TSYN_ENA_M                  BIT(0)
+#define GLTSYN_EVNT_H_0(_i)                    (0x00088970 + ((_i) * 4))
+#define GLTSYN_EVNT_L_0(_i)                    (0x00088968 + ((_i) * 4))
 #define GLTSYN_INCVAL_H(_i)                    (0x00088920 + ((_i) * 4))
 #define GLTSYN_INCVAL_L(_i)                    (0x00088918 + ((_i) * 4))
 #define GLTSYN_SHADJ_H(_i)                     (0x00088910 + ((_i) * 4))
 #define GLTSYN_SHTIME_H(_i)                    (0x000888F0 + ((_i) * 4))
 #define GLTSYN_SHTIME_L(_i)                    (0x000888E8 + ((_i) * 4))
 #define GLTSYN_STAT(_i)                                (0x000888C0 + ((_i) * 4))
+#define GLTSYN_STAT_EVENT0_M                   BIT(0)
+#define GLTSYN_STAT_EVENT1_M                   BIT(1)
+#define GLTSYN_STAT_EVENT2_M                   BIT(2)
 #define GLTSYN_SYNC_DLAY                       0x00088818
+#define GLTSYN_TGT_H_0(_i)                     (0x00088930 + ((_i) * 4))
+#define GLTSYN_TGT_L_0(_i)                     (0x00088928 + ((_i) * 4))
 #define GLTSYN_TIME_H(_i)                      (0x000888D8 + ((_i) * 4))
 #define GLTSYN_TIME_L(_i)                      (0x000888D0 + ((_i) * 4))
 #define PFTSYN_SEM                             0x00088880
index 4238ab0..80736e0 100644 (file)
@@ -161,7 +161,6 @@ struct ice_fltr_desc {
 #define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES      0x1ULL
 
 struct ice_rx_ptype_decoded {
-       u32 ptype:10;
        u32 known:1;
        u32 outer_ip:1;
        u32 outer_ip_ver:2;
@@ -606,9 +605,32 @@ struct ice_tlan_ctx {
        u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
 };
 
-/* macro to make the table lines short */
+/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT ice_ptype_lkup[ptype].known
+ * THEN
+ *      Packet is unknown
+ * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP
+ *      Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ *      Use the enum ice_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short, use explicit indexing with [PTYPE] */
 #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
-       {       PTYPE, \
+       [PTYPE] = { \
                1, \
                ICE_RX_PTYPE_OUTER_##OUTER_IP, \
                ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
@@ -619,14 +641,14 @@ struct ice_tlan_ctx {
                ICE_RX_PTYPE_INNER_PROT_##I, \
                ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
 
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 
 /* shorter macros makes the table fit but are terse */
 #define ICE_RX_PTYPE_NOF               ICE_RX_PTYPE_NOT_FRAG
 #define ICE_RX_PTYPE_FRG               ICE_RX_PTYPE_FRAG
 
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
+/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
+static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
        /* L2 Packet types */
        ICE_PTT_UNUSED_ENTRY(0),
        ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
@@ -832,118 +854,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
        ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
 
        /* unused entries */
-       ICE_PTT_UNUSED_ENTRY(154),
-       ICE_PTT_UNUSED_ENTRY(155),
-       ICE_PTT_UNUSED_ENTRY(156),
-       ICE_PTT_UNUSED_ENTRY(157),
-       ICE_PTT_UNUSED_ENTRY(158),
-       ICE_PTT_UNUSED_ENTRY(159),
-
-       ICE_PTT_UNUSED_ENTRY(160),
-       ICE_PTT_UNUSED_ENTRY(161),
-       ICE_PTT_UNUSED_ENTRY(162),
-       ICE_PTT_UNUSED_ENTRY(163),
-       ICE_PTT_UNUSED_ENTRY(164),
-       ICE_PTT_UNUSED_ENTRY(165),
-       ICE_PTT_UNUSED_ENTRY(166),
-       ICE_PTT_UNUSED_ENTRY(167),
-       ICE_PTT_UNUSED_ENTRY(168),
-       ICE_PTT_UNUSED_ENTRY(169),
-
-       ICE_PTT_UNUSED_ENTRY(170),
-       ICE_PTT_UNUSED_ENTRY(171),
-       ICE_PTT_UNUSED_ENTRY(172),
-       ICE_PTT_UNUSED_ENTRY(173),
-       ICE_PTT_UNUSED_ENTRY(174),
-       ICE_PTT_UNUSED_ENTRY(175),
-       ICE_PTT_UNUSED_ENTRY(176),
-       ICE_PTT_UNUSED_ENTRY(177),
-       ICE_PTT_UNUSED_ENTRY(178),
-       ICE_PTT_UNUSED_ENTRY(179),
-
-       ICE_PTT_UNUSED_ENTRY(180),
-       ICE_PTT_UNUSED_ENTRY(181),
-       ICE_PTT_UNUSED_ENTRY(182),
-       ICE_PTT_UNUSED_ENTRY(183),
-       ICE_PTT_UNUSED_ENTRY(184),
-       ICE_PTT_UNUSED_ENTRY(185),
-       ICE_PTT_UNUSED_ENTRY(186),
-       ICE_PTT_UNUSED_ENTRY(187),
-       ICE_PTT_UNUSED_ENTRY(188),
-       ICE_PTT_UNUSED_ENTRY(189),
-
-       ICE_PTT_UNUSED_ENTRY(190),
-       ICE_PTT_UNUSED_ENTRY(191),
-       ICE_PTT_UNUSED_ENTRY(192),
-       ICE_PTT_UNUSED_ENTRY(193),
-       ICE_PTT_UNUSED_ENTRY(194),
-       ICE_PTT_UNUSED_ENTRY(195),
-       ICE_PTT_UNUSED_ENTRY(196),
-       ICE_PTT_UNUSED_ENTRY(197),
-       ICE_PTT_UNUSED_ENTRY(198),
-       ICE_PTT_UNUSED_ENTRY(199),
-
-       ICE_PTT_UNUSED_ENTRY(200),
-       ICE_PTT_UNUSED_ENTRY(201),
-       ICE_PTT_UNUSED_ENTRY(202),
-       ICE_PTT_UNUSED_ENTRY(203),
-       ICE_PTT_UNUSED_ENTRY(204),
-       ICE_PTT_UNUSED_ENTRY(205),
-       ICE_PTT_UNUSED_ENTRY(206),
-       ICE_PTT_UNUSED_ENTRY(207),
-       ICE_PTT_UNUSED_ENTRY(208),
-       ICE_PTT_UNUSED_ENTRY(209),
-
-       ICE_PTT_UNUSED_ENTRY(210),
-       ICE_PTT_UNUSED_ENTRY(211),
-       ICE_PTT_UNUSED_ENTRY(212),
-       ICE_PTT_UNUSED_ENTRY(213),
-       ICE_PTT_UNUSED_ENTRY(214),
-       ICE_PTT_UNUSED_ENTRY(215),
-       ICE_PTT_UNUSED_ENTRY(216),
-       ICE_PTT_UNUSED_ENTRY(217),
-       ICE_PTT_UNUSED_ENTRY(218),
-       ICE_PTT_UNUSED_ENTRY(219),
-
-       ICE_PTT_UNUSED_ENTRY(220),
-       ICE_PTT_UNUSED_ENTRY(221),
-       ICE_PTT_UNUSED_ENTRY(222),
-       ICE_PTT_UNUSED_ENTRY(223),
-       ICE_PTT_UNUSED_ENTRY(224),
-       ICE_PTT_UNUSED_ENTRY(225),
-       ICE_PTT_UNUSED_ENTRY(226),
-       ICE_PTT_UNUSED_ENTRY(227),
-       ICE_PTT_UNUSED_ENTRY(228),
-       ICE_PTT_UNUSED_ENTRY(229),
-
-       ICE_PTT_UNUSED_ENTRY(230),
-       ICE_PTT_UNUSED_ENTRY(231),
-       ICE_PTT_UNUSED_ENTRY(232),
-       ICE_PTT_UNUSED_ENTRY(233),
-       ICE_PTT_UNUSED_ENTRY(234),
-       ICE_PTT_UNUSED_ENTRY(235),
-       ICE_PTT_UNUSED_ENTRY(236),
-       ICE_PTT_UNUSED_ENTRY(237),
-       ICE_PTT_UNUSED_ENTRY(238),
-       ICE_PTT_UNUSED_ENTRY(239),
-
-       ICE_PTT_UNUSED_ENTRY(240),
-       ICE_PTT_UNUSED_ENTRY(241),
-       ICE_PTT_UNUSED_ENTRY(242),
-       ICE_PTT_UNUSED_ENTRY(243),
-       ICE_PTT_UNUSED_ENTRY(244),
-       ICE_PTT_UNUSED_ENTRY(245),
-       ICE_PTT_UNUSED_ENTRY(246),
-       ICE_PTT_UNUSED_ENTRY(247),
-       ICE_PTT_UNUSED_ENTRY(248),
-       ICE_PTT_UNUSED_ENTRY(249),
-
-       ICE_PTT_UNUSED_ENTRY(250),
-       ICE_PTT_UNUSED_ENTRY(251),
-       ICE_PTT_UNUSED_ENTRY(252),
-       ICE_PTT_UNUSED_ENTRY(253),
-       ICE_PTT_UNUSED_ENTRY(254),
-       ICE_PTT_UNUSED_ENTRY(255),
+       [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
 };
 
 static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
index cb858be..dde9802 100644 (file)
@@ -1760,12 +1760,13 @@ setup_rings:
  * ice_vsi_cfg_txqs - Configure the VSI for Tx
  * @vsi: the VSI being configured
  * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
  *
  * Return 0 on success and a negative value on error
  * Configure the Tx VSI for operation.
  */
 static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
 {
        struct ice_aqc_add_tx_qgrp *qg_buf;
        u16 q_idx = 0;
@@ -1777,7 +1778,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
 
        qg_buf->num_txqs = 1;
 
-       for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+       for (q_idx = 0; q_idx < count; q_idx++) {
                err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
                if (err)
                        goto err_cfg_txqs;
@@ -1797,7 +1798,7 @@ err_cfg_txqs:
  */
 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
 {
-       return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
+       return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
 }
 
 /**
@@ -1812,7 +1813,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
        int ret;
        int i;
 
-       ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+       ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
        if (ret)
                return ret;
 
@@ -2052,17 +2053,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
  * @rst_src: reset source
  * @rel_vmvf_num: Relative ID of VF/VM
  * @rings: Tx ring array to be stopped
+ * @count: number of Tx ring array elements
  */
 static int
 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
-                     u16 rel_vmvf_num, struct ice_ring **rings)
+                     u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
 {
        u16 q_idx;
 
        if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
                return -EINVAL;
 
-       for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+       for (q_idx = 0; q_idx < count; q_idx++) {
                struct ice_txq_meta txq_meta = { };
                int status;
 
@@ -2090,7 +2092,7 @@ int
 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
                          u16 rel_vmvf_num)
 {
-       return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
+       return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
 }
 
 /**
@@ -2099,7 +2101,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
  */
 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
 {
-       return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+       return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
 }
 
 /**
index 5ca6c03..ef8d181 100644 (file)
 #include "ice_dcb_lib.h"
 #include "ice_dcb_nl.h"
 #include "ice_devlink.h"
+/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
+ * ice tracepoint functions. This must be done exactly once across the
+ * ice driver.
+ */
+#define CREATE_TRACE_POINTS
+#include "ice_trace.h"
 
 #define DRV_SUMMARY    "Intel(R) Ethernet Connection E800 Series Linux Driver"
 static const char ice_driver_string[] = DRV_SUMMARY;
@@ -2642,6 +2648,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
 }
 
 /**
+ * ice_xdp_safe_mode - XDP handler for safe mode
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
+                            struct netdev_bpf *xdp)
+{
+       NL_SET_ERR_MSG_MOD(xdp->extack,
+                          "Please provide working DDP firmware package in order to use XDP\n"
+                          "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
+       return -EOPNOTSUPP;
+}
+
+/**
  * ice_xdp - implements XDP handler
  * @dev: netdevice
  * @xdp: XDP command
@@ -2797,6 +2817,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
                ice_ptp_process_ts(pf);
        }
 
+       if (oicr & PFINT_OICR_TSYN_EVNT_M) {
+               u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+               u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
+
+               /* Save EVENTs from GTSYN register */
+               pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
+                                                    GLTSYN_STAT_EVENT1_M |
+                                                    GLTSYN_STAT_EVENT2_M);
+               ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
+               kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
+       }
+
 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
        if (oicr & ICE_AUX_CRIT_ERR) {
                struct iidc_event *event;
@@ -5463,6 +5495,7 @@ static void ice_tx_dim_work(struct work_struct *work)
        itr = tx_profile[dim->profile_ix].itr;
        intrl = tx_profile[dim->profile_ix].intrl;
 
+       ice_trace(tx_dim_work, q_vector, dim);
        ice_write_itr(rc, itr);
        ice_write_intrl(q_vector, intrl);
 
@@ -5487,6 +5520,7 @@ static void ice_rx_dim_work(struct work_struct *work)
        itr = rx_profile[dim->profile_ix].itr;
        intrl = rx_profile[dim->profile_ix].intrl;
 
+       ice_trace(rx_dim_work, q_vector, dim);
        ice_write_itr(rc, itr);
        ice_write_intrl(q_vector, intrl);
 
@@ -7181,6 +7215,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
        .ndo_change_mtu = ice_change_mtu,
        .ndo_get_stats64 = ice_get_stats64,
        .ndo_tx_timeout = ice_tx_timeout,
+       .ndo_bpf = ice_xdp_safe_mode,
 };
 
 static const struct net_device_ops ice_netdev_ops = {
index 609f433..5d5207b 100644 (file)
@@ -4,6 +4,8 @@
 #include "ice.h"
 #include "ice_lib.h"
 
+#define E810_OUT_PROP_DELAY_NS 1
+
 /**
  * ice_set_tx_tstamp - Enable or disable Tx timestamping
  * @pf: The PF pointer to search in
@@ -484,6 +486,255 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
 }
 
 /**
+ * ice_ptp_extts_work - Workqueue task function
+ * @work: external timestamp work structure
+ *
+ * Service for PTP external clock event
+ */
+static void ice_ptp_extts_work(struct kthread_work *work)
+{
+       struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
+       struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+       struct ptp_clock_event event;
+       struct ice_hw *hw = &pf->hw;
+       u8 chan, tmr_idx;
+       u32 hi, lo;
+
+       tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+       /* Event time is captured by one of the two matched registers
+        *      GLTSYN_EVNT_L: 32 LSB of sampled time event
+        *      GLTSYN_EVNT_H: 32 MSB of sampled time event
+        * Event is defined in GLTSYN_EVNT_0 register
+        */
+       for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+               /* Check if channel is enabled */
+               if (pf->ptp.ext_ts_irq & (1 << chan)) {
+                       lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+                       hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+                       event.timestamp = (((u64)hi) << 32) | lo;
+                       event.type = PTP_CLOCK_EXTTS;
+                       event.index = chan;
+
+                       /* Fire event */
+                       ptp_clock_event(pf->ptp.clock, &event);
+                       pf->ptp.ext_ts_irq &= ~(1 << chan);
+               }
+       }
+}
+
+/**
+ * ice_ptp_cfg_extts - Configure EXTTS pin and channel
+ * @pf: Board private structure
+ * @ena: true to enable; false to disable
+ * @chan: GPIO channel (0-3)
+ * @gpio_pin: GPIO pin
+ * @extts_flags: request flags from the ptp_extts_request.flags
+ */
+static int
+ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
+                 unsigned int extts_flags)
+{
+       u32 func, aux_reg, gpio_reg, irq_reg;
+       struct ice_hw *hw = &pf->hw;
+       u8 tmr_idx;
+
+       if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
+               return -EINVAL;
+
+       tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+       irq_reg = rd32(hw, PFINT_OICR_ENA);
+
+       if (ena) {
+               /* Enable the interrupt */
+               irq_reg |= PFINT_OICR_TSYN_EVNT_M;
+               aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
+
+#define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE    BIT(0)
+#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE   BIT(1)
+
+               /* set event level to requested edge */
+               if (extts_flags & PTP_FALLING_EDGE)
+                       aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
+               if (extts_flags & PTP_RISING_EDGE)
+                       aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
+
+               /* Write GPIO CTL reg.
+                * 0x1 is input sampled by EVENT register(channel)
+                * + num_in_channels * tmr_idx
+                */
+               func = 1 + chan + (tmr_idx * 3);
+               gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
+                           GLGEN_GPIO_CTL_PIN_FUNC_M);
+               pf->ptp.ext_ts_chan |= (1 << chan);
+       } else {
+               /* clear the values we set to reset defaults */
+               aux_reg = 0;
+               gpio_reg = 0;
+               pf->ptp.ext_ts_chan &= ~(1 << chan);
+               if (!pf->ptp.ext_ts_chan)
+                       irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
+       }
+
+       wr32(hw, PFINT_OICR_ENA, irq_reg);
+       wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
+       wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
+
+       return 0;
+}
+
+/**
+ * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
+ * @pf: Board private structure
+ * @chan: GPIO channel (0-3)
+ * @config: desired periodic clk configuration. NULL will disable channel
+ * @store: If set to true the values will be stored
+ *
+ * Configure the internal clock generator modules to generate the clock wave of
+ * specified period.
+ */
+static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
+                             struct ice_perout_channel *config, bool store)
+{
+       u64 current_time, period, start_time, phase;
+       struct ice_hw *hw = &pf->hw;
+       u32 func, val, gpio_pin;
+       u8 tmr_idx;
+
+       tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+       /* 0. Reset mode & out_en in AUX_OUT */
+       wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
+
+       /* If we're disabling the output, clear out CLKO and TGT and keep
+        * output level low
+        */
+       if (!config || !config->ena) {
+               wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
+               wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
+               wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
+
+               val = GLGEN_GPIO_CTL_PIN_DIR_M;
+               gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
+               wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+
+               /* Store the value if requested */
+               if (store)
+                       memset(&pf->ptp.perout_channels[chan], 0,
+                              sizeof(struct ice_perout_channel));
+
+               return 0;
+       }
+       period = config->period;
+       start_time = config->start_time;
+       div64_u64_rem(start_time, period, &phase);
+       gpio_pin = config->gpio_pin;
+
+       /* 1. Write clkout with half of required period value */
+       if (period & 0x1) {
+               dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
+               goto err;
+       }
+
+       period >>= 1;
+
+       /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
+        */
+#define MIN_PULSE 3
+       if (period <= MIN_PULSE || period > U32_MAX) {
+               dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
+                       MIN_PULSE * 2);
+               goto err;
+       }
+
+       wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
+
+       /* Allow time for programming before start_time is hit */
+       current_time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+       /* if start time is in the past start the timer at the nearest second
+        * maintaining phase
+        */
+       if (start_time < current_time)
+               start_time = div64_u64(current_time + NSEC_PER_MSEC - 1,
+                                      NSEC_PER_SEC) * NSEC_PER_SEC + phase;
+
+       start_time -= E810_OUT_PROP_DELAY_NS;
+
+       /* 2. Write TARGET time */
+       wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
+       wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
+
+       /* 3. Write AUX_OUT register */
+       val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
+       wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
+
+       /* 4. write GPIO CTL reg */
+       func = 8 + chan + (tmr_idx * 4);
+       val = GLGEN_GPIO_CTL_PIN_DIR_M |
+             ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
+       wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+
+       /* Store the value if requested */
+       if (store) {
+               memcpy(&pf->ptp.perout_channels[chan], config,
+                      sizeof(struct ice_perout_channel));
+               pf->ptp.perout_channels[chan].start_time = phase;
+       }
+
+       return 0;
+err:
+       dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
+       return -EFAULT;
+}
+
+/**
+ * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ */
+static int
+ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
+                        struct ptp_clock_request *rq, int on)
+{
+       struct ice_pf *pf = ptp_info_to_pf(info);
+       struct ice_perout_channel clk_cfg = {0};
+       unsigned int chan;
+       u32 gpio_pin;
+       int err;
+
+       switch (rq->type) {
+       case PTP_CLK_REQ_PEROUT:
+               chan = rq->perout.index;
+               if (chan == PPS_CLK_GEN_CHAN)
+                       clk_cfg.gpio_pin = PPS_PIN_INDEX;
+               else
+                       clk_cfg.gpio_pin = chan;
+
+               clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
+                                  rq->perout.period.nsec);
+               clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
+                                      rq->perout.start.nsec);
+               clk_cfg.ena = !!on;
+
+               err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+               break;
+       case PTP_CLK_REQ_EXTTS:
+               chan = rq->extts.index;
+               gpio_pin = chan;
+
+               err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
+                                       rq->extts.flags);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return err;
+}
+
+/**
  * ice_ptp_gettimex64 - Get the time of the clock
  * @info: the driver's PTP info structure
  * @ts: timespec64 structure to hold the current time value
@@ -741,6 +992,34 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
 }
 
 /**
+ * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
+ * @info: PTP clock capabilities
+ */
+static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
+{
+       info->n_per_out = E810_N_PER_OUT;
+       info->n_ext_ts = E810_N_EXT_TS;
+}
+
+/**
+ * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E810 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for e810
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+       info->enable = ice_ptp_gpio_enable_e810;
+
+       ice_ptp_setup_pins_e810(info);
+}
+
+/**
  * ice_ptp_set_caps - Set PTP capabilities
  * @pf: Board private structure
  */
@@ -757,6 +1036,8 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
        info->adjfine = ice_ptp_adjfine;
        info->gettimex64 = ice_ptp_gettimex64;
        info->settime64 = ice_ptp_settime64;
+
+       ice_ptp_set_funcs_e810(pf, info);
 }
 
 /**
@@ -783,6 +1064,17 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
        info = &pf->ptp.info;
        dev = ice_pf_to_dev(pf);
 
+       /* Allocate memory for kernel pins interface */
+       if (info->n_pins) {
+               info->pin_config = devm_kcalloc(dev, info->n_pins,
+                                               sizeof(*info->pin_config),
+                                               GFP_KERNEL);
+               if (!info->pin_config) {
+                       info->n_pins = 0;
+                       return -ENOMEM;
+               }
+       }
+
        /* Attempt to register the clock before enabling the hardware. */
        clock = ptp_clock_register(info, dev);
        if (IS_ERR(clock))
@@ -1203,6 +1495,7 @@ void ice_ptp_init(struct ice_pf *pf)
 
        /* Initialize work functions */
        kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
+       kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
 
        /* Allocate a kworker for handling work required for the ports
         * connected to the PTP hardware clock.
index d01507e..e1c787b 100644 (file)
@@ -9,6 +9,21 @@
 
 #include "ice_ptp_hw.h"
 
+enum ice_ptp_pin {
+       GPIO_20 = 0,
+       GPIO_21,
+       GPIO_22,
+       GPIO_23,
+       NUM_ICE_PTP_PIN
+};
+
+struct ice_perout_channel {
+       bool ena;
+       u32 gpio_pin;
+       u64 period;
+       u64 start_time;
+};
+
 /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
  * is stored in a buffer of registers. Depending on the specific hardware,
  * this buffer might be shared across multiple PHY ports.
@@ -82,12 +97,18 @@ struct ice_ptp_port {
        struct ice_ptp_tx tx;
 };
 
+#define GLTSYN_TGT_H_IDX_MAX           4
+
 /**
  * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
  * @port: data for the PHY port initialization procedure
  * @work: delayed work function for periodic tasks
+ * @extts_work: work function for handling external Tx timestamps
  * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @ext_ts_chan: the external timestamp channel in use
+ * @ext_ts_irq: the external timestamp IRQ in use
  * @kworker: kwork thread for handling periodic work
+ * @perout_channels: periodic output data
  * @info: structure defining PTP hardware capabilities
  * @clock: pointer to registered PTP clock device
  * @tstamp_config: hardware timestamping configuration
@@ -95,8 +116,12 @@ struct ice_ptp_port {
 struct ice_ptp {
        struct ice_ptp_port port;
        struct kthread_delayed_work work;
+       struct kthread_work extts_work;
        u64 cached_phc_time;
+       u8 ext_ts_chan;
+       u8 ext_ts_irq;
        struct kthread_worker *kworker;
+       struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
        struct ptp_clock_info info;
        struct ptp_clock *clock;
        struct hwtstamp_config tstamp_config;
@@ -115,6 +140,24 @@ struct ice_ptp {
 #define PTP_SHARED_CLK_IDX_VALID       BIT(31)
 #define ICE_PTP_TS_VALID               BIT(0)
 
+/* Per-channel register definitions */
+#define GLTSYN_AUX_OUT(_chan, _idx)    (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
+#define GLTSYN_AUX_IN(_chan, _idx)     (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
+#define GLTSYN_CLKO(_chan, _idx)       (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
+#define GLTSYN_TGT_L(_chan, _idx)      (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_TGT_H(_chan, _idx)      (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_L(_chan, _idx)     (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H(_chan, _idx)     (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H_IDX_MAX          3
+
+/* Pin definitions for PTP PPS out */
+#define PPS_CLK_GEN_CHAN               3
+#define PPS_CLK_SRC_CHAN               2
+#define PPS_PIN_INDEX                  5
+#define TIME_SYNC_PIN_INDEX            4
+#define E810_N_EXT_TS                  3
+#define E810_N_PER_OUT                 4
+
 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
 struct ice_pf;
 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
index a17e24e..9f07b66 100644 (file)
@@ -2745,8 +2745,8 @@ static enum ice_status
 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
                           u16 vsi_handle, unsigned long *tc_bitmap)
 {
-       struct ice_sched_agg_vsi_info *agg_vsi_info;
-       struct ice_sched_agg_info *agg_info;
+       struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+       struct ice_sched_agg_info *agg_info, *old_agg_info;
        enum ice_status status = 0;
        struct ice_hw *hw = pi->hw;
        u8 tc;
@@ -2756,6 +2756,20 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
        agg_info = ice_get_agg_info(hw, agg_id);
        if (!agg_info)
                return ICE_ERR_PARAM;
+       /* If the VSI is already part of another aggregator then update
+        * its VSI info list
+        */
+       old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
+       if (old_agg_info && old_agg_info != agg_info) {
+               struct ice_sched_agg_vsi_info *vtmp;
+
+               list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+                                        &old_agg_info->agg_vsi_list,
+                                        list_entry)
+                       if (old_agg_vsi_info->vsi_handle == vsi_handle)
+                               break;
+       }
+
        /* check if entry already exist */
        agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
        if (!agg_vsi_info) {
@@ -2780,6 +2794,12 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
                        break;
 
                set_bit(tc, agg_vsi_info->tc_bitmap);
+               if (old_agg_vsi_info)
+                       clear_bit(tc, old_agg_vsi_info->tc_bitmap);
+       }
+       if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
+               list_del(&old_agg_vsi_info->list_entry);
+               devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
        }
        return status;
 }
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
new file mode 100644 (file)
index 0000000..9bc0b8f
--- /dev/null
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Intel Corporation. */
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for ice will be "ice".
+ *
+ * This file is named ice_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ice
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_ICE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _ICE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/* ice_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_ice_example(args...)
+ *
+ * ... as:
+ *
+ * ice_trace(example, args...)
+ *
+ * ... to resolve to the PF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, ice_trace_enabled(trace_name) wraps references to
+ * trace_ice_<trace_name>_enabled() functions.
+ * @trace_name: name of tracepoint
+ */
+#define _ICE_TRACE_NAME(trace_name) (trace_##ice##_##trace_name)
+#define ICE_TRACE_NAME(trace_name) _ICE_TRACE_NAME(trace_name)
+
+#define ice_trace(trace_name, args...) ICE_TRACE_NAME(trace_name)(args)
+
+#define ice_trace_enabled(trace_name) ICE_TRACE_NAME(trace_name##_enabled)()
+
+/* This is for events common to PF. Corresponding versions will be named
+ * trace_ice_*. The ice_trace() macro above will select the right trace point
+ * name for the driver.
+ */
+
+/* Begin tracepoints */
+
+/* Global tracepoints */
+
+/* Events related to DIM, q_vectors and ring containers */
+DECLARE_EVENT_CLASS(ice_rx_dim_template,
+                   TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+                   TP_ARGS(q_vector, dim),
+                   TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
+                                    __field(struct dim *, dim)
+                                    __string(devname, q_vector->rx.ring->netdev->name)),
+
+                   TP_fast_assign(__entry->q_vector = q_vector;
+                                  __entry->dim = dim;
+                                  __assign_str(devname, q_vector->rx.ring->netdev->name);),
+
+                   TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
+                             __get_str(devname),
+                             __entry->q_vector->rx.ring->q_index,
+                             __entry->dim->state,
+                             __entry->dim->profile_ix,
+                             __entry->dim->tune_state,
+                             __entry->dim->steps_right,
+                             __entry->dim->steps_left,
+                             __entry->dim->tired)
+);
+
+DEFINE_EVENT(ice_rx_dim_template, ice_rx_dim_work,
+            TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+            TP_ARGS(q_vector, dim)
+);
+
+DECLARE_EVENT_CLASS(ice_tx_dim_template,
+                   TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+                   TP_ARGS(q_vector, dim),
+                   TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
+                                    __field(struct dim *, dim)
+                                    __string(devname, q_vector->tx.ring->netdev->name)),
+
+                   TP_fast_assign(__entry->q_vector = q_vector;
+                                  __entry->dim = dim;
+                                  __assign_str(devname, q_vector->tx.ring->netdev->name);),
+
+                   TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
+                             __get_str(devname),
+                             __entry->q_vector->tx.ring->q_index,
+                             __entry->dim->state,
+                             __entry->dim->profile_ix,
+                             __entry->dim->tune_state,
+                             __entry->dim->steps_right,
+                             __entry->dim->steps_left,
+                             __entry->dim->tired)
+);
+
+DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
+            TP_PROTO(struct ice_q_vector *q_vector, struct dim *dim),
+            TP_ARGS(q_vector, dim)
+);
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(ice_tx_template,
+                   TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc,
+                            struct ice_tx_buf *buf),
+
+                   TP_ARGS(ring, desc, buf),
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, desc)
+                                    __field(void *, buf)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->desc = desc;
+                                  __entry->buf = buf;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+                             __entry->ring, __entry->desc, __entry->buf)
+);
+
+#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_template, name, \
+            TP_PROTO(struct ice_ring *ring, \
+                     struct ice_tx_desc *desc, \
+                     struct ice_tx_buf *buf), \
+            TP_ARGS(ring, desc, buf))
+
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq);
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
+DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
+
+DECLARE_EVENT_CLASS(ice_rx_template,
+                   TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+
+                   TP_ARGS(ring, desc),
+
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, desc)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->desc = desc;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+                             __entry->ring, __entry->desc)
+);
+DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
+            TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+            TP_ARGS(ring, desc)
+);
+
+DECLARE_EVENT_CLASS(ice_rx_indicate_template,
+                   TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+                            struct sk_buff *skb),
+
+                   TP_ARGS(ring, desc, skb),
+
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, desc)
+                                    __field(void *, skb)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->desc = desc;
+                                  __entry->skb = skb;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+                             __entry->ring, __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
+            TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+                     struct sk_buff *skb),
+            TP_ARGS(ring, desc, skb)
+);
+
+DECLARE_EVENT_CLASS(ice_xmit_template,
+                   TP_PROTO(struct ice_ring *ring, struct sk_buff *skb),
+
+                   TP_ARGS(ring, skb),
+
+                   TP_STRUCT__entry(__field(void *, ring)
+                                    __field(void *, skb)
+                                    __string(devname, ring->netdev->name)),
+
+                   TP_fast_assign(__entry->ring = ring;
+                                  __entry->skb = skb;
+                                  __assign_str(devname, ring->netdev->name);),
+
+                   TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+                             __entry->skb, __entry->ring)
+);
+
+#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
+DEFINE_EVENT(ice_xmit_template, name, \
+            TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \
+            TP_ARGS(ring, skb))
+
+DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
+DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
+
+/* End tracepoints */
+
+#endif /* _ICE_TRACE_H_ */
+/* This must be outside ifdef _ICE_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE ../../drivers/net/ethernet/intel/ice/ice_trace
+#include <trace/define_trace.h>
index dd791ca..6ee8e00 100644 (file)
@@ -10,6 +10,7 @@
 #include "ice_txrx_lib.h"
 #include "ice_lib.h"
 #include "ice.h"
+#include "ice_trace.h"
 #include "ice_dcb_lib.h"
 #include "ice_xsk.h"
 
@@ -224,6 +225,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
 
                smp_rmb();      /* prevent any other reads prior to eop_desc */
 
+               ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
                /* if the descriptor isn't done, no work yet to do */
                if (!(eop_desc->cmd_type_offset_bsz &
                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
@@ -254,6 +256,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
 
                /* unmap remaining buffers */
                while (tx_desc != eop_desc) {
+                       ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
                        tx_buf++;
                        tx_desc++;
                        i++;
@@ -272,6 +275,7 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
                                dma_unmap_len_set(tx_buf, len, 0);
                        }
                }
+               ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
 
                /* move us one more past the eop_desc for start of next pkt */
                tx_buf++;
@@ -1082,7 +1086,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                u16 stat_err_bits;
                int rx_buf_pgcnt;
                u16 vlan_tag = 0;
-               u8 rx_ptype;
+               u16 rx_ptype;
 
                /* get the Rx desc from Rx ring based on 'next_to_clean' */
                rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
@@ -1102,6 +1106,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                 */
                dma_rmb();
 
+               ice_trace(clean_rx_irq, rx_ring, rx_desc);
                if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
                        struct ice_vsi *ctrl_vsi = rx_ring->vsi;
 
@@ -1203,6 +1208,7 @@ construct_skb:
 
                ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
+               ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
                /* send completed skb up the stack */
                ice_receive_skb(rx_ring, skb, vlan_tag);
                skb = NULL;
@@ -2184,6 +2190,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
        unsigned int count;
        int tso, csum;
 
+       ice_trace(xmit_frame_ring, tx_ring, skb);
+
        count = ice_xmit_desc_count(skb);
        if (ice_chk_linearize(skb, count)) {
                if (__skb_linearize(skb))
@@ -2258,6 +2266,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
        return NETDEV_TX_OK;
 
 out_drop:
+       ice_trace(xmit_frame_ring_drop, tx_ring, skb);
        dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
index 166cf25..171397d 100644 (file)
@@ -38,10 +38,23 @@ void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
  * ice_ptype_to_htype - get a hash type
  * @ptype: the ptype value from the descriptor
  *
- * Returns a hash type to be used by skb_set_hash
+ * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
+ * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
+ * Rx desc.
  */
-static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
+static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
 {
+       struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
+
+       if (!decoded.known)
+               return PKT_HASH_TYPE_NONE;
+       if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+               return PKT_HASH_TYPE_L4;
+       if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+               return PKT_HASH_TYPE_L3;
+       if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
+               return PKT_HASH_TYPE_L2;
+
        return PKT_HASH_TYPE_NONE;
 }
 
@@ -54,7 +67,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
  */
 static void
 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
-           struct sk_buff *skb, u8 rx_ptype)
+           struct sk_buff *skb, u16 rx_ptype)
 {
        struct ice_32b_rx_flex_desc_nic *nic_mdid;
        u32 hash;
@@ -81,7 +94,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
  */
 static void
 ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
-           union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+           union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
 {
        struct ice_rx_ptype_decoded decoded;
        u16 rx_status0, rx_status1;
@@ -167,7 +180,7 @@ checksum_fail:
 void
 ice_process_skb_fields(struct ice_ring *rx_ring,
                       union ice_32b_rx_flex_desc *rx_desc,
-                      struct sk_buff *skb, u8 ptype)
+                      struct sk_buff *skb, u16 ptype)
 {
        ice_rx_hash(rx_ring, rx_desc, skb, ptype);
 
index 58ff58f..05ac307 100644 (file)
@@ -53,7 +53,7 @@ void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
 void
 ice_process_skb_fields(struct ice_ring *rx_ring,
                       union ice_32b_rx_flex_desc *rx_desc,
-                      struct sk_buff *skb, u8 ptype);
+                      struct sk_buff *skb, u16 ptype);
 void
 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
 #endif /* !_ICE_TXRX_LIB_H_ */
index 6392e0b..2826570 100644 (file)
@@ -1689,7 +1689,6 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
                else
                        promisc_m = ICE_UCAST_PROMISC_BITS;
 
-               vsi = ice_get_vf_vsi(vf);
                if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
                        dev_err(dev, "disabling promiscuous mode failed\n");
        }
index 8a09336..5a9f61d 100644 (file)
@@ -525,7 +525,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
                struct sk_buff *skb;
                u16 stat_err_bits;
                u16 vlan_tag = 0;
-               u8 rx_ptype;
+               u16 rx_ptype;
 
                rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
index 27df06e..fb78f17 100644 (file)
@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
 
 static int xrx200_alloc_skb(struct xrx200_chan *ch)
 {
+       struct sk_buff *skb = ch->skb[ch->dma.desc];
        dma_addr_t mapping;
        int ret = 0;
 
@@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
                                 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
                dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+               ch->skb[ch->dma.desc] = skb;
                ret = -ENOMEM;
                goto skip;
        }
@@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
        ch->dma.desc %= LTQ_DESC_NUM;
 
        if (ret) {
-               ch->skb[ch->dma.desc] = skb;
                net_dev->stats.rx_dropped++;
                netdev_err(net_dev, "failed to allocate new rx buffer\n");
                return ret;
@@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
        struct xrx200_chan *ch = ptr;
 
        if (napi_schedule_prep(&ch->napi)) {
-               __napi_schedule(&ch->napi);
                ltq_dma_disable_irq(&ch->dma);
+               __napi_schedule(&ch->napi);
        }
 
        ltq_dma_ack_irq(&ch->dma);
index d14762d..62a97c4 100644 (file)
@@ -17,6 +17,8 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -281,7 +283,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
        struct orion_mdio_dev *dev;
        int i, ret;
 
-       type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev);
+       type = (enum orion_mdio_bus_type)device_get_match_data(&pdev->dev);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -369,7 +371,13 @@ static int orion_mdio_probe(struct platform_device *pdev)
                goto out_mdio;
        }
 
-       ret = of_mdiobus_register(bus, pdev->dev.of_node);
+       /* For the platforms not supporting DT/ACPI fall-back
+        * to mdiobus_register via of_mdiobus_register.
+        */
+       if (is_acpi_node(pdev->dev.fwnode))
+               ret = acpi_mdiobus_register(bus, pdev->dev.fwnode);
+       else
+               ret = of_mdiobus_register(bus, pdev->dev.of_node);
        if (ret < 0) {
                dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
                goto out_mdio;
@@ -421,12 +429,20 @@ static const struct of_device_id orion_mdio_match[] = {
 };
 MODULE_DEVICE_TABLE(of, orion_mdio_match);
 
+static const struct acpi_device_id orion_mdio_acpi_match[] = {
+       { "MRVL0100", BUS_TYPE_SMI },
+       { "MRVL0101", BUS_TYPE_XSMI },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
+
 static struct platform_driver orion_mdio_driver = {
        .probe = orion_mdio_probe,
        .remove = orion_mdio_remove,
        .driver = {
                .name = "orion-mdio",
                .of_match_table = orion_mdio_match,
+               .acpi_match_table = ACPI_PTR(orion_mdio_acpi_match),
        },
 };
 
index ada4e26..361bc4f 100644 (file)
@@ -1805,18 +1805,14 @@ static void mvneta_rx_error(struct mvneta_port *pp,
 }
 
 /* Handle RX checksum offload based on the descriptor's status */
-static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
-                          struct sk_buff *skb)
+static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
 {
        if ((pp->dev->features & NETIF_F_RXCSUM) &&
            (status & MVNETA_RXD_L3_IP4) &&
-           (status & MVNETA_RXD_L4_CSUM_OK)) {
-               skb->csum = 0;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-               return;
-       }
+           (status & MVNETA_RXD_L4_CSUM_OK))
+               return CHECKSUM_UNNECESSARY;
 
-       skb->ip_summed = CHECKSUM_NONE;
+       return CHECKSUM_NONE;
 }
 
 /* Return tx queue pointer (find last set bit) according to <cause> returned
@@ -2335,7 +2331,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
 
        skb_reserve(skb, xdp->data - xdp->data_hard_start);
        skb_put(skb, xdp->data_end - xdp->data);
-       mvneta_rx_csum(pp, desc_status, skb);
+       skb->ip_summed = mvneta_rx_csum(pp, desc_status);
 
        for (i = 0; i < num_frags; i++) {
                skb_frag_t *frag = &sinfo->frags[i];
@@ -2533,7 +2529,7 @@ err_drop_frame:
                                     rx_bytes);
 
                        skb->protocol = eth_type_trans(skb, dev);
-                       mvneta_rx_csum(pp, rx_status, skb);
+                       skb->ip_summed = mvneta_rx_csum(pp, rx_status);
                        napi_gro_receive(napi, skb);
 
                        rcvd_pkts++;
@@ -2582,8 +2578,7 @@ err_drop_frame:
                skb_put(skb, rx_bytes);
 
                skb->protocol = eth_type_trans(skb, dev);
-
-               mvneta_rx_csum(pp, rx_status, skb);
+               skb->ip_summed = mvneta_rx_csum(pp, rx_status);
 
                napi_gro_receive(napi, skb);
        }
index 4a61c90..b9fbc9f 100644 (file)
@@ -1197,9 +1197,6 @@ struct mvpp2_port {
        /* Firmware node associated to the port */
        struct fwnode_handle *fwnode;
 
-       /* Is a PHY always connected to the port */
-       bool has_phy;
-
        /* Per-port registers' base address */
        void __iomem *base;
        void __iomem *stats_base;
index c316775..3229baf 100644 (file)
@@ -3543,21 +3543,17 @@ static void mvpp2_rx_error(struct mvpp2_port *port,
 }
 
 /* Handle RX checksum offload */
-static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
-                         struct sk_buff *skb)
+static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
 {
        if (((status & MVPP2_RXD_L3_IP4) &&
             !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
            (status & MVPP2_RXD_L3_IP6))
                if (((status & MVPP2_RXD_L4_UDP) ||
                     (status & MVPP2_RXD_L4_TCP)) &&
-                    (status & MVPP2_RXD_L4_CSUM_OK)) {
-                       skb->csum = 0;
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       return;
-               }
+                    (status & MVPP2_RXD_L4_CSUM_OK))
+                       return CHECKSUM_UNNECESSARY;
 
-       skb->ip_summed = CHECKSUM_NONE;
+       return CHECKSUM_NONE;
 }
 
 /* Allocate a new skb and add it to BM pool */
@@ -4010,7 +4006,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
 
                skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
                skb_put(skb, rx_bytes);
-               mvpp2_rx_csum(port, rx_status, skb);
+               skb->ip_summed = mvpp2_rx_csum(port, rx_status);
                skb->protocol = eth_type_trans(skb, dev);
 
                napi_gro_receive(napi, skb);
@@ -4789,9 +4785,8 @@ static int mvpp2_open(struct net_device *dev)
                goto err_cleanup_txqs;
        }
 
-       /* Phylink isn't supported yet in ACPI mode */
-       if (port->of_node) {
-               err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+       if (port->phylink) {
+               err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
                if (err) {
                        netdev_err(port->dev, "could not attach PHY (%d)\n",
                                   err);
@@ -6699,6 +6694,19 @@ static void mvpp2_acpi_start(struct mvpp2_port *port)
                          SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
 }
 
+/* In order to ensure backward compatibility for ACPI, check if the port
+ * firmware node comprises the necessary description allowing to use phylink.
+ */
+static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
+{
+       if (!is_acpi_node(port_fwnode))
+               return false;
+
+       return (!fwnode_property_present(port_fwnode, "phy-handle") &&
+               !fwnode_property_present(port_fwnode, "managed") &&
+               !fwnode_get_named_child_node(port_fwnode, "fixed-link"));
+}
+
 /* Ports initialization */
 static int mvpp2_port_probe(struct platform_device *pdev,
                            struct fwnode_handle *port_fwnode,
@@ -6774,7 +6782,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        port = netdev_priv(dev);
        port->dev = dev;
        port->fwnode = port_fwnode;
-       port->has_phy = !!of_find_property(port_node, "phy", NULL);
        port->ntxqs = ntxqs;
        port->nrxqs = nrxqs;
        port->priv = priv;
@@ -6917,8 +6924,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
        dev->dev.of_node = port_node;
 
-       /* Phylink isn't used w/ ACPI as of now */
-       if (port_node) {
+       if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
                port->phylink_config.dev = &dev->dev;
                port->phylink_config.type = PHYLINK_NETDEV;
 
@@ -6930,6 +6936,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
                }
                port->phylink = phylink;
        } else {
+               dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
                port->phylink = NULL;
        }
 
index 7d7dfa8..770d862 100644 (file)
@@ -746,7 +746,7 @@ struct nix_aq_enq_rsp {
                struct nix_cq_ctx_s cq;
                struct nix_rsse_s   rss;
                struct nix_rx_mce_s mce;
-               u64 prof;
+               struct nix_bandprof_s prof;
        };
 };
 
index 87d7c6a..6863314 100644 (file)
@@ -123,11 +123,8 @@ static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
 static bool npc_is_same(struct npc_key_field *input,
                        struct npc_key_field *field)
 {
-       int ret;
-
-       ret = memcmp(&input->layer_mdata, &field->layer_mdata,
-                    sizeof(struct npc_layer_mdata));
-       return ret == 0;
+       return memcmp(&input->layer_mdata, &field->layer_mdata,
+                    sizeof(struct npc_layer_mdata)) == 0;
 }
 
 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
index 74b81b4..0b3e8f2 100644 (file)
@@ -708,7 +708,7 @@ err_port_stp_set:
        return err;
 }
 
-static int prestera_port_obj_attr_set(struct net_device *dev,
+static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
                                      const struct switchdev_attr *attr,
                                      struct netlink_ext_ack *extack)
 {
@@ -1040,7 +1040,7 @@ static int prestera_port_vlans_add(struct prestera_port *port,
                                             flag_pvid, extack);
 }
 
-static int prestera_port_obj_add(struct net_device *dev,
+static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
                                 const struct switchdev_obj *obj,
                                 struct netlink_ext_ack *extack)
 {
@@ -1078,7 +1078,7 @@ static int prestera_port_vlans_del(struct prestera_port *port,
        return 0;
 }
 
-static int prestera_port_obj_del(struct net_device *dev,
+static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
                                 const struct switchdev_obj *obj)
 {
        struct prestera_port *port = netdev_priv(dev);
index e967867..9b48ae4 100644 (file)
@@ -1528,6 +1528,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
        struct net_device *dev = platform_get_drvdata(pdev);
        struct pxa168_eth_private *pep = netdev_priv(dev);
 
+       cancel_work_sync(&pep->tx_timeout_task);
        if (pep->htpr) {
                dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
                                  pep->htpr, pep->htpr_dma);
@@ -1539,7 +1540,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
        clk_disable_unprepare(pep->clk);
        mdiobus_unregister(pep->smi_bus);
        mdiobus_free(pep->smi_bus);
-       cancel_work_sync(&pep->tx_timeout_task);
        unregister_netdev(dev);
        free_netdev(dev);
        return 0;
index ff6613a..b4f66eb 100644 (file)
@@ -22,5 +22,6 @@ source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
 source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 source "drivers/net/ethernet/mellanox/mlxfw/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
index 79773ac..d4b5f54 100644 (file)
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
 obj-$(CONFIG_MLXSW_CORE) += mlxsw/
 obj-$(CONFIG_MLXFW) += mlxfw/
+obj-$(CONFIG_MLXBF_GIGE) += mlxbf_gige/
index f6cfec8..dc4ac1a 100644 (file)
@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET         0xb0
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET   0xa8
 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET  0xac
+#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        if (mlx4_is_mfunc(dev))
                disable_unsupported_roce_caps(outbox);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
+       dev_cap->map_clock_to_user = field & 0x80;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
        dev_cap->reserved_qps = 1 << (field & 0xf);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
index 8f020f2..cf64e54 100644 (file)
@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
        u32 health_buffer_addrs;
        struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
        bool wol_port[MLX4_MAX_PORTS + 1];
+       bool map_clock_to_user;
 };
 
 struct mlx4_func_cap {
index c326b43..00c8465 100644 (file)
@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
+       dev->caps.map_clock_to_user  = dev_cap->map_clock_to_user;
        dev->caps.uar_page_size      = PAGE_SIZE;
        dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
        dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
        if (mlx4_is_slave(dev))
                return -EOPNOTSUPP;
 
+       if (!dev->caps.map_clock_to_user) {
+               mlx4_dbg(dev, "Map clock to user is not supported.\n");
+               return -EOPNOTSUPP;
+       }
+
        if (!params)
                return -EINVAL;
 
index d62f90a..e1a5a79 100644 (file)
@@ -12,7 +12,6 @@ config MLX5_CORE
        depends on MLXFW || !MLXFW
        depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
        depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
-       default n
        help
          Core driver for low level functionality of the ConnectX-4 and
          Connect-IB cards by Mellanox Technologies.
@@ -36,7 +35,6 @@ config MLX5_CORE_EN
        depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
        select PAGE_POOL
        select DIMLIB
-       default n
        help
          Ethernet support in Mellanox Technologies ConnectX-4 NIC.
 
@@ -141,7 +139,6 @@ config MLX5_CORE_EN_DCB
 config MLX5_CORE_IPOIB
        bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support"
        depends on MLX5_CORE_EN
-       default n
        help
          MLX5 IPoIB offloads & acceleration support.
 
@@ -149,7 +146,6 @@ config MLX5_FPGA_IPSEC
        bool "Mellanox Technologies IPsec Innova support"
        depends on MLX5_CORE
        depends on MLX5_FPGA
-       default n
        help
        Build IPsec support for the Innova family of network cards by Mellanox
        Technologies. Innova network cards are comprised of a ConnectX chip
@@ -163,7 +159,6 @@ config MLX5_IPSEC
        depends on XFRM_OFFLOAD
        depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
        select MLX5_ACCEL
-       default n
        help
        Build IPsec support for the Connect-X family of network cards by Mellanox
        Technologies.
@@ -176,7 +171,6 @@ config MLX5_EN_IPSEC
        depends on XFRM_OFFLOAD
        depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
        depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
-       default n
        help
          Build support for IPsec cryptography-offload acceleration in the NIC.
          Note: Support for hardware with this capability needs to be selected
@@ -189,7 +183,6 @@ config MLX5_FPGA_TLS
        depends on MLX5_CORE_EN
        depends on MLX5_FPGA
        select MLX5_EN_TLS
-       default n
        help
        Build TLS support for the Innova family of network cards by Mellanox
        Technologies. Innova network cards are comprised of a ConnectX chip
@@ -204,7 +197,6 @@ config MLX5_TLS
        depends on MLX5_CORE_EN
        select MLX5_ACCEL
        select MLX5_EN_TLS
-       default n
        help
        Build TLS support for the Connect-X family of network cards by Mellanox
        Technologies.
@@ -227,7 +219,6 @@ config MLX5_SW_STEERING
 config MLX5_SF
        bool "Mellanox Technologies subfunction device support using auxiliary device"
        depends on MLX5_CORE && MLX5_CORE_EN
-       default n
        help
        Build support for subfuction device in the NIC. A Mellanox subfunction
        device can support RDMA, netdevice and vdpa device.
index a9166cd..ceebfc2 100644 (file)
@@ -303,6 +303,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
        int ret = 0, i;
 
        mutex_lock(&mlx5_intf_mutex);
+       priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
        for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
                if (!priv->adev[i]) {
                        bool is_supported = false;
@@ -320,6 +321,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
                        }
                } else {
                        adev = &priv->adev[i]->adev;
+
+                       /* Pay attention that this is not PCI driver that
+                        * mlx5_core_dev is connected, but auxiliary driver.
+                        *
+                        * Here we can race of module unload with devlink
+                        * reload, but we don't need to take extra lock because
+                        * we are holding global mlx5_intf_mutex.
+                        */
+                       if (!adev->dev.driver)
+                               continue;
                        adrv = to_auxiliary_drv(adev->dev.driver);
 
                        if (adrv->resume)
@@ -350,6 +361,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
                        continue;
 
                adev = &priv->adev[i]->adev;
+               /* Auxiliary driver was unbind manually through sysfs */
+               if (!adev->dev.driver)
+                       goto skip_suspend;
+
                adrv = to_auxiliary_drv(adev->dev.driver);
 
                if (adrv->suspend) {
@@ -357,9 +372,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
                        continue;
                }
 
+skip_suspend:
                del_adev(&priv->adev[i]->adev);
                priv->adev[i] = NULL;
        }
+       priv->flags |= MLX5_PRIV_FLAGS_DETACH;
        mutex_unlock(&mlx5_intf_mutex);
 }
 
@@ -448,6 +465,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
        struct mlx5_priv *priv = &dev->priv;
 
        lockdep_assert_held(&mlx5_intf_mutex);
+       if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
+               return 0;
 
        delete_drivers(dev);
        if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
index 0dd7615..bc33eaa 100644 (file)
@@ -64,6 +64,8 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct devlink_port *port;
 
+       if (!netif_device_present(dev))
+               return NULL;
        port = mlx5e_devlink_get_dl_port(priv);
        if (port->registered)
                return port;
index d907c1a..778e229 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 // Copyright (c) 2020 Mellanox Technologies
 
-#include <linux/ptp_classify.h>
 #include "en/ptp.h"
 #include "en/txrx.h"
 #include "en/params.h"
index ab935cc..c96668b 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "en.h"
 #include "en_stats.h"
+#include <linux/ptp_classify.h>
 
 struct mlx5e_ptpsq {
        struct mlx5e_txqsq       txqsq;
@@ -43,6 +44,27 @@ struct mlx5e_ptp {
        DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
 };
 
+static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
+{
+       struct flow_keys fk;
+
+       if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               return false;
+
+       if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+               return false;
+
+       if (fk.basic.n_proto == htons(ETH_P_1588))
+               return true;
+
+       if (fk.basic.n_proto != htons(ETH_P_IP) &&
+           fk.basic.n_proto != htons(ETH_P_IPV6))
+               return false;
+
+       return (fk.basic.ip_proto == IPPROTO_UDP &&
+               fk.ports.dst == htons(PTP_EV_PORT));
+}
+
 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
                   u8 lag_port, struct mlx5e_ptp **cp);
 void mlx5e_ptp_close(struct mlx5e_ptp *c);
index 7f5efc1..3c0032c 100644 (file)
@@ -76,6 +76,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
 }
 
 static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
+                                       const void *ctx,
                                        const struct switchdev_obj *obj,
                                        struct netlink_ext_ack *extack)
 {
@@ -107,6 +108,7 @@ static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
 }
 
 static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
+                                       const void *ctx,
                                        const struct switchdev_obj *obj)
 {
        const struct switchdev_obj_port_vlan *vlan;
@@ -136,6 +138,7 @@ static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
 }
 
 static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
+                                            const void *ctx,
                                             const struct switchdev_attr *attr,
                                             struct netlink_ext_ack *extack)
 {
index be0ee03..2e9bee4 100644 (file)
@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
                                                             work);
        struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
        struct neighbour *n = update_work->n;
+       struct mlx5e_encap_entry *e = NULL;
        bool neigh_connected, same_dev;
-       struct mlx5e_encap_entry *e;
        unsigned char ha[ETH_ALEN];
-       struct mlx5e_priv *priv;
        u8 nud_state, dead;
 
        rtnl_lock();
@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
        if (!same_dev)
                goto out;
 
-       list_for_each_entry(e, &nhe->encap_list, encap_list) {
-               if (!mlx5e_encap_take(e))
-                       continue;
+       /* mlx5e_get_next_init_encap() releases previous encap before returning
+        * the next one.
+        */
+       while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
+               mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
 
-               priv = netdev_priv(e->out_dev);
-               mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
-               mlx5e_encap_put(priv, e);
-       }
 out:
        rtnl_unlock();
        mlx5e_release_neigh_update_work(update_work);
index f0b98f5..059799e 100644 (file)
@@ -94,13 +94,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 
        ASSERT_RTNL();
 
-       /* wait for encap to be fully initialized */
-       wait_for_completion(&e->res_ready);
-
        mutex_lock(&esw->offloads.encap_tbl_lock);
        encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
-       if (e->compl_result < 0 || (encap_connected == neigh_connected &&
-                                   ether_addr_equal(e->h_dest, ha)))
+       if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
                goto unlock;
 
        mlx5e_take_all_encap_flows(e, &flow_list);
index 0dfd51d..2e846b7 100644 (file)
@@ -255,9 +255,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
                mlx5e_take_tmp_flow(flow, flow_list, 0);
 }
 
+typedef bool (match_cb)(struct mlx5e_encap_entry *);
+
 static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
-                          struct mlx5e_encap_entry *e)
+mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
+                             struct mlx5e_encap_entry *e,
+                             match_cb match)
 {
        struct mlx5e_encap_entry *next = NULL;
 
@@ -292,7 +295,7 @@ retry:
        /* wait for encap to be fully initialized */
        wait_for_completion(&next->res_ready);
        /* continue searching if encap entry is not in valid state after completion */
-       if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+       if (!match(next)) {
                e = next;
                goto retry;
        }
@@ -300,6 +303,30 @@ retry:
        return next;
 }
 
+static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
+{
+       return e->flags & MLX5_ENCAP_ENTRY_VALID;
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+                          struct mlx5e_encap_entry *e)
+{
+       return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
+}
+
+static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
+{
+       return e->compl_result >= 0;
+}
+
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+                         struct mlx5e_encap_entry *e)
+{
+       return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
+}
+
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
 {
        struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
index 3d45341..7cab08a 100644 (file)
@@ -428,7 +428,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
        spin_lock_init(&ipsec->sadb_rx_lock);
        ida_init(&ipsec->halloc);
        ipsec->en_priv = priv;
-       ipsec->en_priv->ipsec = ipsec;
        ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
                               MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
        ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
@@ -438,6 +437,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
                return -ENOMEM;
        }
 
+       priv->ipsec = ipsec;
        mlx5e_accel_ipsec_fs_init(priv);
        netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
        return 0;
@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
        struct mlx5_core_dev *mdev = priv->mdev;
        struct net_device *netdev = priv->netdev;
 
-       if (!priv->ipsec)
-               return;
-
        if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
            !MLX5_CAP_ETH(mdev, swp)) {
                mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
index a97e8d2..33de8f0 100644 (file)
@@ -136,8 +136,6 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
                                struct mlx5_wqe_eth_seg *eseg, u8 mode,
                                struct xfrm_offload *xo)
 {
-       struct mlx5e_swp_spec swp_spec = {};
-
        /* Tunnel Mode:
         * SWP:      OutL3       InL3  InL4
         * Pkt: MAC  IP     ESP  IP    L4
@@ -146,23 +144,58 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
         * SWP:      OutL3       InL4
         *           InL3
         * Pkt: MAC  IP     ESP  L4
+        *
+        * Tunnel(VXLAN TCP/UDP) over Transport Mode
+        * SWP:      OutL3                   InL3  InL4
+        * Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
         */
-       swp_spec.l3_proto = skb->protocol;
-       swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
-       if (swp_spec.is_tun) {
-               if (xo->proto == IPPROTO_IPV6) {
-                       swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
-                       swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
-               } else {
-                       swp_spec.tun_l3_proto = htons(ETH_P_IP);
-                       swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
-               }
-       } else {
-               swp_spec.tun_l3_proto = skb->protocol;
-               swp_spec.tun_l4_proto = xo->proto;
+
+       /* Shared settings */
+       eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+       if (skb->protocol == htons(ETH_P_IPV6))
+               eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+       /* Tunnel mode */
+       if (mode == XFRM_MODE_TUNNEL) {
+               eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+               eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+               if (xo->proto == IPPROTO_IPV6)
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+               if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+               return;
+       }
+
+       /* Transport mode */
+       if (mode != XFRM_MODE_TRANSPORT)
+               return;
+
+       if (!xo->inner_ipproto) {
+               eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+               eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+               if (xo->proto == IPPROTO_UDP)
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+               return;
+       }
+
+       /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+       switch (xo->inner_ipproto) {
+       case IPPROTO_UDP:
+               eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+               fallthrough;
+       case IPPROTO_TCP:
+               eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+               eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+               break;
+       default:
+               break;
        }
 
-       mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+       return;
 }
 
 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
index 3e80742..5120a59 100644 (file)
@@ -93,18 +93,38 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
                               struct mlx5_wqe_eth_seg *eseg);
 
-static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
-                                            netdev_features_t features)
+static inline netdev_features_t
+mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
 {
+       struct xfrm_offload *xo = xfrm_offload(skb);
        struct sec_path *sp = skb_sec_path(skb);
 
-       if (sp && sp->len) {
+       if (sp && sp->len && xo) {
                struct xfrm_state *x = sp->xvec[0];
 
-               if (x && x->xso.offload_handle)
-                       return true;
+               if (!x || !x->xso.offload_handle)
+                       goto out_disable;
+
+               if (xo->inner_ipproto) {
+                       /* Cannot support tunnel packet over IPsec tunnel mode
+                        * because we cannot offload three IP header csum
+                        */
+                       if (x->props.mode == XFRM_MODE_TUNNEL)
+                               goto out_disable;
+
+                       /* Only support UDP or TCP L4 checksum */
+                       if (xo->inner_ipproto != IPPROTO_UDP &&
+                           xo->inner_ipproto != IPPROTO_TCP)
+                               goto out_disable;
+               }
+
+               return features;
+
        }
-       return false;
+
+       /* Disable CSUM and GSO for software IPsec */
+out_disable:
+       return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 }
 
 #else
@@ -120,8 +140,9 @@ static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
 }
 
 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
-static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
-                                            netdev_features_t features) { return false; }
+static inline netdev_features_t
+mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
 #endif /* CONFIG_MLX5_EN_IPSEC */
 
 #endif /* __MLX5E_IPSEC_RXTX_H__ */
index 2c0a934..9ad3459 100644 (file)
@@ -138,6 +138,7 @@ void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
        priv = netdev_priv(netdev);
        mdev = priv->mdev;
 
+       atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
        mlx5e_destroy_tis(mdev, priv_tx->tisn);
        mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
        kfree(priv_tx);
index 3fd6fd6..62ecf14 100644 (file)
@@ -42,6 +42,7 @@
 
 struct mlx5e_tls_sw_stats {
        atomic64_t tx_tls_ctx;
+       atomic64_t tx_tls_del;
        atomic64_t tx_tls_drop_metadata;
        atomic64_t tx_tls_drop_resync_alloc;
        atomic64_t tx_tls_drop_no_sync_data;
index ffc84f9..56e7b2a 100644 (file)
@@ -47,6 +47,7 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
 
 static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
 };
index 5cd466e..25403af 100644 (file)
@@ -356,7 +356,7 @@ err:
 
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
 {
-       int err = 0;
+       int err = -ENOMEM;
        int i;
 
        if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
index 930b225..414a73d 100644 (file)
@@ -2709,8 +2709,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
        nch = priv->channels.params.num_channels;
        ntc = priv->channels.params.num_tc;
        num_rxqs = nch * priv->profile->rq_groups;
-       if (priv->channels.params.ptp_rx)
-               num_rxqs++;
 
        mlx5e_netdev_set_tcs(netdev, nch, ntc);
 
@@ -4333,6 +4331,11 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
                        return features;
 #endif
+               break;
+#ifdef CONFIG_MLX5_EN_IPSEC
+       case IPPROTO_ESP:
+               return mlx5e_ipsec_feature_check(skb, features);
+#endif
        }
 
 out:
@@ -4349,9 +4352,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
        features = vlan_features_check(skb, features);
        features = vxlan_features_check(skb, features);
 
-       if (mlx5e_ipsec_feature_check(skb, netdev, features))
-               return features;
-
        /* Validate if the tunneled packet is being offloaded by HW */
        if (skb->encapsulation &&
            (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
@@ -4826,22 +4826,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        }
 
        if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
-               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
-                                          NETIF_F_GSO_UDP_TUNNEL_CSUM;
-               netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
-                                          NETIF_F_GSO_UDP_TUNNEL_CSUM;
-               netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
-               netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
-                                        NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
        }
 
        if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
-               netdev->hw_features     |= NETIF_F_GSO_GRE |
-                                          NETIF_F_GSO_GRE_CSUM;
-               netdev->hw_enc_features |= NETIF_F_GSO_GRE |
-                                          NETIF_F_GSO_GRE_CSUM;
-               netdev->gso_partial_features |= NETIF_F_GSO_GRE |
-                                               NETIF_F_GSO_GRE_CSUM;
+               netdev->hw_features     |= NETIF_F_GSO_GRE;
+               netdev->hw_enc_features |= NETIF_F_GSO_GRE;
+               netdev->gso_partial_features |= NETIF_F_GSO_GRE;
        }
 
        if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
index 2d2cc5f..bf94bcb 100644 (file)
@@ -51,6 +51,7 @@
 #include "lib/mlx5.h"
 #define CREATE_TRACE_POINTS
 #include "diag/en_rep_tracepoint.h"
+#include "en_accel/ipsec.h"
 
 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
        max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -630,6 +631,11 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
                             struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       err = mlx5e_ipsec_init(priv);
+       if (err)
+               mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
 
        mlx5e_vxlan_set_netdev_info(priv);
        return mlx5e_init_rep(mdev, netdev);
@@ -637,6 +643,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
 
 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
 {
+       mlx5e_ipsec_cleanup(priv);
 }
 
 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
index cf4558e..629a61e 100644 (file)
@@ -846,7 +846,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
                 hash_hairpin_info(peer_id, match_prio));
        mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
 
-       params.log_data_size = 15;
+       params.log_data_size = 16;
        params.log_data_size = min_t(u8, params.log_data_size,
                                     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
        params.log_data_size = max_t(u8, params.log_data_size,
@@ -4793,7 +4793,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
        list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
                wait_for_completion(&hpe->res_ready);
                if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
-                       hpe->hp->pair->peer_gone = true;
+                       mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
 
                mlx5e_hairpin_put(priv, hpe);
        }
index 721093b..f7cbeb0 100644 (file)
@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
 
 struct mlx5e_neigh_hash_entry;
+struct mlx5e_encap_entry *
+mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
+                         struct mlx5e_encap_entry *e);
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
 
 void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
index 669ff58..c63d78e 100644 (file)
@@ -32,7 +32,6 @@
 
 #include <linux/tcp.h>
 #include <linux/if_vlan.h>
-#include <linux/ptp_classify.h>
 #include <net/geneve.h>
 #include <net/dsfield.h>
 #include "en.h"
@@ -67,24 +66,6 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
 }
 #endif
 
-static bool mlx5e_use_ptpsq(struct sk_buff *skb)
-{
-       struct flow_keys fk;
-
-       if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
-               return false;
-
-       if (fk.basic.n_proto == htons(ETH_P_1588))
-               return true;
-
-       if (fk.basic.n_proto != htons(ETH_P_IP) &&
-           fk.basic.n_proto != htons(ETH_P_IPV6))
-               return false;
-
-       return (fk.basic.ip_proto == IPPROTO_UDP &&
-               fk.ports.dst == htons(PTP_EV_PORT));
-}
-
 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
@@ -145,9 +126,9 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                }
 
                ptp_channel = READ_ONCE(priv->channels.ptp);
-               if (unlikely(ptp_channel) &&
-                   test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
-                   mlx5e_use_ptpsq(skb))
+               if (unlikely(ptp_channel &&
+                            test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+                            mlx5e_use_ptpsq(skb)))
                        return mlx5e_select_ptpsq(dev, skb);
 
                txq_ix = netdev_pick_tx(dev, skb, NULL);
index 7e5b382..6e074cc 100644 (file)
@@ -113,7 +113,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
 
        eqe = next_eqe_sw(eq);
        if (!eqe)
-               return 0;
+               goto out;
 
        do {
                struct mlx5_core_cq *cq;
@@ -138,6 +138,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
                ++eq->cons_index;
 
        } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+
+out:
        eq_update_ci(eq, 1);
 
        if (cqn != -1)
@@ -225,9 +227,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
                ++eq->cons_index;
 
        } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
-       eq_update_ci(eq, 1);
 
 out:
+       eq_update_ci(eq, 1);
        mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
 
        return unlikely(recovery) ? num_eqes : 0;
@@ -710,7 +712,7 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
        struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
        int err;
 
-       if (!param->affinity)
+       if (!cpumask_available(param->affinity))
                return ERR_PTR(-EINVAL);
 
        if (!eq)
index b88705a..97e6cb6 100644 (file)
@@ -1054,6 +1054,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
                        goto err_vhca_mapping;
        }
 
+       /* External controller host PF has factory programmed MAC.
+        * Read it from the device.
+        */
+       if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
+               mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
+
        esw_vport_change_handle_locked(vport);
 
        esw->enabled_vports++;
index 2cd7aea..d7bf0a3 100644 (file)
@@ -1504,7 +1504,9 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
                    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
                     d1->tir_num == d2->tir_num) ||
                    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
-                    d1->ft_num == d2->ft_num))
+                    d1->ft_num == d2->ft_num) ||
+                   (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
+                    d1->sampler_id == d2->sampler_id))
                        return true;
        }
 
@@ -2969,8 +2971,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                return err;
 
        steering = kzalloc(sizeof(*steering), GFP_KERNEL);
-       if (!steering)
+       if (!steering) {
+               err = -ENOMEM;
                goto err;
+       }
+
        steering->dev = dev;
        dev->priv.steering = steering;
 
index 390b1d3..eb1b316 100644 (file)
@@ -1162,7 +1162,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
        err = mlx5_core_set_hca_defaults(dev);
        if (err) {
                mlx5_core_err(dev, "Failed to set hca defaults\n");
-               goto err_sriov;
+               goto err_set_hca;
        }
 
        mlx5_vhca_event_start(dev);
@@ -1196,6 +1196,7 @@ err_ec:
        mlx5_sf_hw_table_destroy(dev);
 err_vhca:
        mlx5_vhca_event_stop(dev);
+err_set_hca:
        mlx5_cleanup_fs(dev);
 err_fs:
        mlx5_accel_tls_cleanup(dev);
index 50af84e..174f71e 100644 (file)
@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
        mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
        mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
        mkey->size = MLX5_GET64(mkc, mkc, len);
-       mkey->key |= mlx5_idx_to_mkey(mkey_index);
+       mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
        mkey->pd = MLX5_GET(mkc, mkc, pd);
        init_waitqueue_head(&mkey->wait);
 
index 27de8da..b25f764 100644 (file)
@@ -479,7 +479,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
        if (!mlx5_sf_max_functions(dev))
                return 0;
        if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
-               mlx5_core_err(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+               mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
                return 0;
        }
 
index 441b545..540cf05 100644 (file)
@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
 {
        int err;
 
+       if (!MLX5_CAP_GEN(dev, roce))
+               return;
+
        err = mlx5_nic_vport_enable_roce(dev);
        if (err) {
                mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
index 6a0c6f9..fa0288a 100644 (file)
@@ -163,6 +163,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
        sf_index = event->function_id - base_id;
        sf_dev = xa_load(&table->devices, sf_index);
        switch (event->new_vhca_state) {
+       case MLX5_VHCA_STATE_INVALID:
        case MLX5_VHCA_STATE_ALLOCATED:
                if (sf_dev)
                        mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
index 500c71f..d9c6912 100644 (file)
@@ -73,26 +73,29 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control
                                     u32 usr_sfnum)
 {
        struct mlx5_sf_hwc_table *hwc;
+       int free_idx = -1;
        int i;
 
        hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
        if (!hwc->sfs)
                return -ENOSPC;
 
-       /* Check if sf with same sfnum already exists or not. */
        for (i = 0; i < hwc->max_fn; i++) {
+               if (!hwc->sfs[i].allocated && free_idx == -1) {
+                       free_idx = i;
+                       continue;
+               }
+
                if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
                        return -EEXIST;
        }
-       /* Find the free entry and allocate the entry from the array */
-       for (i = 0; i < hwc->max_fn; i++) {
-               if (!hwc->sfs[i].allocated) {
-                       hwc->sfs[i].usr_sfnum = usr_sfnum;
-                       hwc->sfs[i].allocated = true;
-                       return i;
-               }
-       }
-       return -ENOSPC;
+
+       if (free_idx == -1)
+               return -ENOSPC;
+
+       hwc->sfs[free_idx].usr_sfnum = usr_sfnum;
+       hwc->sfs[free_idx].allocated = true;
+       return free_idx;
 }
 
 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
index de68c0e..6475ba3 100644 (file)
@@ -31,6 +31,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
@@ -45,6 +46,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_DECAP,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
@@ -57,6 +59,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_ENCAP,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
                },
@@ -64,6 +67,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
@@ -74,6 +78,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_MODIFY_VLAN,
@@ -86,6 +91,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
@@ -104,6 +110,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_NO_ACTION] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
                        [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
@@ -114,11 +121,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_ENCAP] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
                },
                [DR_ACTION_STATE_MODIFY_HDR] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
                        [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
@@ -128,6 +137,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_MODIFY_VLAN] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
@@ -137,6 +147,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_NON_TERM] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
                        [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
@@ -152,6 +163,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_NO_ACTION] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
                        [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
@@ -166,6 +178,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
@@ -178,11 +191,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
                },
                [DR_ACTION_STATE_MODIFY_HDR] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
@@ -192,6 +207,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_MODIFY_VLAN] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
@@ -203,6 +219,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_NON_TERM] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
                        [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
@@ -221,6 +238,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_NO_ACTION] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
@@ -233,11 +251,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
                },
                [DR_ACTION_STATE_MODIFY_HDR] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
                        [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
@@ -248,6 +268,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_MODIFY_VLAN] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_VLAN,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
@@ -258,6 +279,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
                [DR_ACTION_STATE_NON_TERM] = {
                        [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
                        [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
                        [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
                        [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
@@ -519,6 +541,10 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                        attr.reformat.size = action->reformat->size;
                        attr.reformat.id = action->reformat->id;
                        break;
+               case DR_ACTION_TYP_SAMPLER:
+                       attr.final_icm_addr = rx_rule ? action->sampler->rx_icm_addr :
+                                                       action->sampler->tx_icm_addr;
+                       break;
                case DR_ACTION_TYP_VPORT:
                        attr.hit_gvmi = action->vport->caps->vhca_gvmi;
                        dest_action = action;
@@ -612,6 +638,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
        [DR_ACTION_TYP_VPORT]        = sizeof(struct mlx5dr_action_vport),
        [DR_ACTION_TYP_PUSH_VLAN]    = sizeof(struct mlx5dr_action_push_vlan),
        [DR_ACTION_TYP_INSERT_HDR]   = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_SAMPLER]      = sizeof(struct mlx5dr_action_sampler),
 };
 
 static struct mlx5dr_action *
@@ -824,6 +851,31 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
        return action;
 }
 
+struct mlx5dr_action *
+mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id)
+{
+       struct mlx5dr_action *action;
+       u64 icm_rx, icm_tx;
+       int ret;
+
+       ret = mlx5dr_cmd_query_flow_sampler(dmn->mdev, sampler_id,
+                                           &icm_rx, &icm_tx);
+       if (ret)
+               return NULL;
+
+       action = dr_action_create_generic(DR_ACTION_TYP_SAMPLER);
+       if (!action)
+               return NULL;
+
+       action->sampler->dmn = dmn;
+       action->sampler->sampler_id = sampler_id;
+       action->sampler->rx_icm_addr = icm_rx;
+       action->sampler->tx_icm_addr = icm_tx;
+
+       refcount_inc(&dmn->refcount);
+       return action;
+}
+
 static int
 dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
                                 struct mlx5dr_domain *dmn,
@@ -1624,6 +1676,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
                kfree(action->rewrite->data);
                refcount_dec(&action->rewrite->dmn->refcount);
                break;
+       case DR_ACTION_TYP_SAMPLER:
+               refcount_dec(&action->sampler->dmn->refcount);
+               break;
        default:
                break;
        }
index 6314f50..54e1f54 100644 (file)
@@ -228,6 +228,36 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
        return 0;
 }
 
+int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
+                                 u32 sampler_id,
+                                 u64 *rx_icm_addr,
+                                 u64 *tx_icm_addr)
+{
+       u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+       void *attr;
+       int ret;
+
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+                MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+                MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
+
+       ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               return ret;
+
+       attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
+
+       *rx_icm_addr = MLX5_GET64(sampler_obj, attr,
+                                 sw_steering_icm_address_rx);
+       *tx_icm_addr = MLX5_GET64(sampler_obj, attr,
+                                 sw_steering_icm_address_tx);
+
+       return 0;
+}
+
 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
 {
        u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
@@ -711,6 +741,9 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
                                                 fte->dest_arr[i].vport.reformat_id);
                                }
                                break;
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+                               id = fte->dest_arr[i].sampler_id;
+                               break;
                        default:
                                id = fte->dest_arr[i].tir_num;
                        }
index 42668de..4aaca8e 100644 (file)
@@ -783,7 +783,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
        if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
                return -EINVAL;
 
-       memcpy(padded_data, data, data_sz);
+       inline_data_sz =
+               MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+       /* Add an alignment padding  */
+       memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
 
        /* Remove L2L3 outer headers */
        MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
@@ -795,32 +799,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
        hw_action += DR_STE_ACTION_DOUBLE_SZ;
        used_actions++; /* Remove and NOP are a single double action */
 
-       inline_data_sz =
-               MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+       /* Point to the last dword of the header */
+       data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
 
-       /* Add the new header inline + 2 extra bytes */
+       /* Add the new header using inline action 4Byte at a time, the header
+        * is added in reversed order to the beginning of the packet to avoid
+        * incorrect parsing by the HW. Since header is 14B or 18B an extra
+        * two bytes are padded and later removed.
+        */
        for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
                void *addr_inline;
 
                MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
                         DR_STE_V1_ACTION_ID_INSERT_INLINE);
                /* The hardware expects here offset to words (2 bytes) */
-               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
-                        i * 2);
+               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
 
                /* Copy bytes one by one to avoid endianness problem */
                addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
                                           hw_action, inline_data);
-               memcpy(addr_inline, data_ptr, inline_data_sz);
+               memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
                hw_action += DR_STE_ACTION_DOUBLE_SZ;
-               data_ptr += inline_data_sz;
                used_actions++;
        }
 
-       /* Remove 2 extra bytes */
+       /* Remove first 2 extra bytes */
        MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
                 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
-       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
        /* The hardware expects here size in words (2 bytes) */
        MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
        used_actions++;
index 60b8c04..f5e93fa 100644 (file)
@@ -124,6 +124,7 @@ enum mlx5dr_action_type {
        DR_ACTION_TYP_POP_VLAN,
        DR_ACTION_TYP_PUSH_VLAN,
        DR_ACTION_TYP_INSERT_HDR,
+       DR_ACTION_TYP_SAMPLER,
        DR_ACTION_TYP_MAX,
 };
 
@@ -919,6 +920,13 @@ struct mlx5dr_action_reformat {
        u8 param_1;
 };
 
+struct mlx5dr_action_sampler {
+       struct mlx5dr_domain *dmn;
+       u64 rx_icm_addr;
+       u64 tx_icm_addr;
+       u32 sampler_id;
+};
+
 struct mlx5dr_action_dest_tbl {
        u8 is_fw_tbl:1;
        union {
@@ -962,6 +970,7 @@ struct mlx5dr_action {
                void *data;
                struct mlx5dr_action_rewrite *rewrite;
                struct mlx5dr_action_reformat *reformat;
+               struct mlx5dr_action_sampler *sampler;
                struct mlx5dr_action_dest_tbl *dest_tbl;
                struct mlx5dr_action_ctr *ctr;
                struct mlx5dr_action_vport *vport;
@@ -1116,6 +1125,10 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
                          bool other_vport, u16 vport_number, u16 *gvmi);
 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
                              struct mlx5dr_esw_caps *caps);
+int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
+                                 u32 sampler_id,
+                                 u64 *rx_icm_addr,
+                                 u64 *tx_icm_addr);
 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
                                        u32 table_type,
@@ -1303,6 +1316,7 @@ struct mlx5dr_cmd_flow_destination_hw_info {
                u32 ft_num;
                u32 ft_id;
                u32 counter_id;
+               u32 sampler_id;
                struct {
                        u16 num;
                        u16 vhca_id;
index 00b4c75..d5926dd 100644 (file)
@@ -387,7 +387,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
        if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
                list_for_each_entry(dst, &fte->node.children, node.list) {
                        enum mlx5_flow_destination_type type = dst->dest_attr.type;
-                       u32 ft_id;
+                       u32 id;
 
                        if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
                            num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -425,9 +425,20 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                                num_term_actions++;
                                break;
                        case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
-                               ft_id = dst->dest_attr.ft_num;
+                               id = dst->dest_attr.ft_num;
                                tmp_action = mlx5dr_action_create_dest_table_num(domain,
-                                                                                ft_id);
+                                                                                id);
+                               if (!tmp_action) {
+                                       err = -ENOMEM;
+                                       goto free_actions;
+                               }
+                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                               term_actions[num_term_actions++].dest = tmp_action;
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+                               id = dst->dest_attr.sampler_id;
+                               tmp_action = mlx5dr_action_create_flow_sampler(domain,
+                                                                              id);
                                if (!tmp_action) {
                                        err = -ENOMEM;
                                        goto free_actions;
index 0e2b737..bbfe101 100644 (file)
@@ -101,6 +101,9 @@ struct mlx5dr_action *mlx5dr_action_create_drop(void);
 struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
 
 struct mlx5dr_action *
+mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id);
+
+struct mlx5dr_action *
 mlx5dr_action_create_flow_counter(u32 counter_id);
 
 struct mlx5dr_action *
@@ -127,10 +130,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
 static inline bool
 mlx5dr_is_supported(struct mlx5_core_dev *dev)
 {
-       return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
-              (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
-               (MLX5_CAP_GEN(dev, steering_format_version) <=
-                MLX5_STEERING_FORMAT_CONNECTX_6DX));
+       return MLX5_CAP_GEN(dev, roce) &&
+              (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+               (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+                (MLX5_CAP_GEN(dev, steering_format_version) <=
+                 MLX5_STEERING_FORMAT_CONNECTX_6DX)));
 }
 
 /* buddy functions & structure */
index 01cc00a..b6931bb 100644 (file)
@@ -424,6 +424,15 @@ err_modify_sq:
        return err;
 }
 
+static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
+{
+       int i;
+
+       for (i = 0; i < hp->num_channels; i++)
+               mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+                                      MLX5_SQC_STATE_RST, 0, 0);
+}
+
 static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
 {
        int i;
@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
        for (i = 0; i < hp->num_channels; i++)
                mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
                                       MLX5_RQC_STATE_RST, 0, 0);
-
        /* unset peer SQs */
-       if (hp->peer_gone)
-               return;
-       for (i = 0; i < hp->num_channels; i++)
-               mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
-                                      MLX5_SQC_STATE_RST, 0, 0);
+       if (!hp->peer_gone)
+               mlx5_hairpin_unpair_peer_sq(hp);
 }
 
 struct mlx5_hairpin *
@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
        mlx5_hairpin_destroy_queues(hp);
        kfree(hp);
 }
+
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
+{
+       int i;
+
+       mlx5_hairpin_unpair_peer_sq(hp);
+
+       /* destroy peer SQ */
+       for (i = 0; i < hp->num_channels; i++)
+               mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+
+       hp->peer_gone = true;
+}
index 457ad42..4c1440a 100644 (file)
@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
        void *in;
        int err;
 
-       if (!vport)
-               return -EINVAL;
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
 
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig b/drivers/net/ethernet/mellanox/mlxbf_gige/Kconfig
new file mode 100644 (file)
index 0000000..4cdebaf
--- /dev/null
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+#
+# Mellanox GigE driver configuration
+#
+
+config MLXBF_GIGE
+       tristate "Mellanox Technologies BlueField Gigabit Ethernet support"
+       depends on (ARM64 && ACPI) || COMPILE_TEST
+       select PHYLIB
+       help
+         The second generation BlueField SoC from Mellanox Technologies
+         supports an out-of-band Gigabit Ethernet management port to the
+         Arm subsystem.
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile b/drivers/net/ethernet/mellanox/mlxbf_gige/Makefile
new file mode 100644 (file)
index 0000000..e57c137
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+obj-$(CONFIG_MLXBF_GIGE) += mlxbf_gige.o
+
+mlxbf_gige-y := mlxbf_gige_ethtool.o \
+               mlxbf_gige_gpio.o \
+               mlxbf_gige_intr.o \
+               mlxbf_gige_main.o \
+               mlxbf_gige_mdio.o \
+               mlxbf_gige_rx.o   \
+               mlxbf_gige_tx.o
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
new file mode 100644 (file)
index 0000000..e3509e6
--- /dev/null
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* Header file for Gigabit Ethernet driver for Mellanox BlueField SoC
+ * - this file contains software data structures and any chip-specific
+ *   data structures (e.g. TX WQE format) that are memory resident.
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#ifndef __MLXBF_GIGE_H__
+#define __MLXBF_GIGE_H__
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/irqreturn.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+
+/* The silicon design supports a maximum RX ring size of
+ * 32K entries. Based on current testing this maximum size
+ * is not required to be supported.  Instead the RX ring
+ * will be capped at a realistic value of 1024 entries.
+ */
+#define MLXBF_GIGE_MIN_RXQ_SZ     32
+#define MLXBF_GIGE_MAX_RXQ_SZ     1024
+#define MLXBF_GIGE_DEFAULT_RXQ_SZ 128
+
+#define MLXBF_GIGE_MIN_TXQ_SZ     4
+#define MLXBF_GIGE_MAX_TXQ_SZ     256
+#define MLXBF_GIGE_DEFAULT_TXQ_SZ 128
+
+#define MLXBF_GIGE_DEFAULT_BUF_SZ 2048
+
+#define MLXBF_GIGE_DMA_PAGE_SZ    4096
+#define MLXBF_GIGE_DMA_PAGE_SHIFT 12
+
+/* There are four individual MAC RX filters. Currently
+ * two of them are being used: one for the broadcast MAC
+ * (index 0) and one for local MAC (index 1)
+ */
+#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
+#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
+
+/* Define for broadcast MAC literal */
+#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
+
+/* There are three individual interrupts:
+ *   1) Errors, "OOB" interrupt line
+ *   2) Receive Packet, "OOB_LLU" interrupt line
+ *   3) LLU and PLU Events, "OOB_PLU" interrupt line
+ */
+#define MLXBF_GIGE_ERROR_INTR_IDX       0
+#define MLXBF_GIGE_RECEIVE_PKT_INTR_IDX 1
+#define MLXBF_GIGE_LLU_PLU_INTR_IDX     2
+#define MLXBF_GIGE_PHY_INT_N            3
+
+#define MLXBF_GIGE_MDIO_DEFAULT_PHY_ADDR 0x3
+
+#define MLXBF_GIGE_DEFAULT_PHY_INT_GPIO 12
+
+struct mlxbf_gige_stats {
+       u64 hw_access_errors;
+       u64 tx_invalid_checksums;
+       u64 tx_small_frames;
+       u64 tx_index_errors;
+       u64 sw_config_errors;
+       u64 sw_access_errors;
+       u64 rx_truncate_errors;
+       u64 rx_mac_errors;
+       u64 rx_din_dropped_pkts;
+       u64 tx_fifo_full;
+       u64 rx_filter_passed_pkts;
+       u64 rx_filter_discard_pkts;
+};
+
+struct mlxbf_gige {
+       void __iomem *base;
+       void __iomem *llu_base;
+       void __iomem *plu_base;
+       struct device *dev;
+       struct net_device *netdev;
+       struct platform_device *pdev;
+       void __iomem *mdio_io;
+       struct mii_bus *mdiobus;
+       void __iomem *gpio_io;
+       struct irq_domain *irqdomain;
+       u32 phy_int_gpio_mask;
+       spinlock_t lock;      /* for packet processing indices */
+       spinlock_t gpio_lock; /* for GPIO bus access */
+       u16 rx_q_entries;
+       u16 tx_q_entries;
+       u64 *tx_wqe_base;
+       dma_addr_t tx_wqe_base_dma;
+       u64 *tx_wqe_next;
+       u64 *tx_cc;
+       dma_addr_t tx_cc_dma;
+       dma_addr_t *rx_wqe_base;
+       dma_addr_t rx_wqe_base_dma;
+       u64 *rx_cqe_base;
+       dma_addr_t rx_cqe_base_dma;
+       u16 tx_pi;
+       u16 prev_tx_ci;
+       u64 error_intr_count;
+       u64 rx_intr_count;
+       u64 llu_plu_intr_count;
+       struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ];
+       struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ];
+       int error_irq;
+       int rx_irq;
+       int llu_plu_irq;
+       int phy_irq;
+       int hw_phy_irq;
+       bool promisc_enabled;
+       u8 valid_polarity;
+       struct napi_struct napi;
+       struct mlxbf_gige_stats stats;
+};
+
+/* Rx Work Queue Element definitions */
+#define MLXBF_GIGE_RX_WQE_SZ                   8
+
+/* Rx Completion Queue Element definitions */
+#define MLXBF_GIGE_RX_CQE_SZ                   8
+#define MLXBF_GIGE_RX_CQE_PKT_LEN_MASK         GENMASK(10, 0)
+#define MLXBF_GIGE_RX_CQE_VALID_MASK           GENMASK(11, 11)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK      GENMASK(15, 12)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR   GENMASK(12, 12)
+#define MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED GENMASK(13, 13)
+#define MLXBF_GIGE_RX_CQE_CHKSUM_MASK          GENMASK(31, 16)
+
+/* Tx Work Queue Element definitions */
+#define MLXBF_GIGE_TX_WQE_SZ_QWORDS            2
+#define MLXBF_GIGE_TX_WQE_SZ                   16
+#define MLXBF_GIGE_TX_WQE_PKT_LEN_MASK         GENMASK(10, 0)
+#define MLXBF_GIGE_TX_WQE_UPDATE_MASK          GENMASK(31, 31)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_LEN_MASK      GENMASK(42, 32)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_START_MASK    GENMASK(55, 48)
+#define MLXBF_GIGE_TX_WQE_CHKSUM_OFFSET_MASK   GENMASK(63, 56)
+
+/* Macro to return packet length of specified TX WQE */
+#define MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr) \
+       (*((tx_wqe_addr) + 1) & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK)
+
+/* Tx Completion Count */
+#define MLXBF_GIGE_TX_CC_SZ                    8
+
+/* List of resources in ACPI table */
+enum mlxbf_gige_res {
+       MLXBF_GIGE_RES_MAC,
+       MLXBF_GIGE_RES_MDIO9,
+       MLXBF_GIGE_RES_GPIO0,
+       MLXBF_GIGE_RES_LLU,
+       MLXBF_GIGE_RES_PLU
+};
+
+/* Version of register data returned by mlxbf_gige_get_regs() */
+#define MLXBF_GIGE_REGS_VERSION 1
+
+int mlxbf_gige_mdio_probe(struct platform_device *pdev,
+                         struct mlxbf_gige *priv);
+void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
+void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 dmac);
+void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 *dmac);
+void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv);
+void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv);
+int mlxbf_gige_rx_init(struct mlxbf_gige *priv);
+void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv);
+int mlxbf_gige_tx_init(struct mlxbf_gige *priv);
+void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv);
+bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv);
+netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
+                                 struct net_device *netdev);
+struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
+                                    unsigned int map_len,
+                                    dma_addr_t *buf_dma,
+                                    enum dma_data_direction dir);
+int mlxbf_gige_request_irqs(struct mlxbf_gige *priv);
+void mlxbf_gige_free_irqs(struct mlxbf_gige *priv);
+int mlxbf_gige_poll(struct napi_struct *napi, int budget);
+extern const struct ethtool_ops mlxbf_gige_ethtool_ops;
+void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv);
+
+int mlxbf_gige_gpio_init(struct platform_device *pdev, struct mlxbf_gige *priv);
+void mlxbf_gige_gpio_free(struct mlxbf_gige *priv);
+
+#endif /* !defined(__MLXBF_GIGE_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
new file mode 100644 (file)
index 0000000..92b798f
--- /dev/null
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Ethtool support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/phy.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+/* Start of struct ethtool_ops functions */
+static int mlxbf_gige_get_regs_len(struct net_device *netdev)
+{
+       return MLXBF_GIGE_MMIO_REG_SZ;
+}
+
+static void mlxbf_gige_get_regs(struct net_device *netdev,
+                               struct ethtool_regs *regs, void *p)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       regs->version = MLXBF_GIGE_REGS_VERSION;
+
+       /* Read entire MMIO register space and store results
+        * into the provided buffer. Each 64-bit word is converted
+        * to big-endian to make the output more readable.
+        *
+        * NOTE: by design, a read to an offset without an existing
+        *       register will be acknowledged and return zero.
+        */
+       memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
+}
+
+static void mlxbf_gige_get_ringparam(struct net_device *netdev,
+                                    struct ethtool_ringparam *ering)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       ering->rx_max_pending = MLXBF_GIGE_MAX_RXQ_SZ;
+       ering->tx_max_pending = MLXBF_GIGE_MAX_TXQ_SZ;
+       ering->rx_pending = priv->rx_q_entries;
+       ering->tx_pending = priv->tx_q_entries;
+}
+
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} mlxbf_gige_ethtool_stats_keys[] = {
+       { "hw_access_errors" },
+       { "tx_invalid_checksums" },
+       { "tx_small_frames" },
+       { "tx_index_errors" },
+       { "sw_config_errors" },
+       { "sw_access_errors" },
+       { "rx_truncate_errors" },
+       { "rx_mac_errors" },
+       { "rx_din_dropped_pkts" },
+       { "tx_fifo_full" },
+       { "rx_filter_passed_pkts" },
+       { "rx_filter_discard_pkts" },
+};
+
+static int mlxbf_gige_get_sset_count(struct net_device *netdev, int stringset)
+{
+       if (stringset != ETH_SS_STATS)
+               return -EOPNOTSUPP;
+       return ARRAY_SIZE(mlxbf_gige_ethtool_stats_keys);
+}
+
+static void mlxbf_gige_get_strings(struct net_device *netdev, u32 stringset,
+                                  u8 *buf)
+{
+       if (stringset != ETH_SS_STATS)
+               return;
+       memcpy(buf, &mlxbf_gige_ethtool_stats_keys,
+              sizeof(mlxbf_gige_ethtool_stats_keys));
+}
+
+static void mlxbf_gige_get_ethtool_stats(struct net_device *netdev,
+                                        struct ethtool_stats *estats,
+                                        u64 *data)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       /* Fill data array with interface statistics
+        *
+        * NOTE: the data writes must be in
+        *       sync with the strings shown in
+        *       the mlxbf_gige_ethtool_stats_keys[] array
+        *
+        * NOTE2: certain statistics below are zeroed upon
+        *        port disable, so the calculation below
+        *        must include the "cached" value of the stat
+        *        plus the value read directly from hardware.
+        *        Cached statistics are currently:
+        *          rx_din_dropped_pkts
+        *          rx_filter_passed_pkts
+        *          rx_filter_discard_pkts
+        */
+       *data++ = priv->stats.hw_access_errors;
+       *data++ = priv->stats.tx_invalid_checksums;
+       *data++ = priv->stats.tx_small_frames;
+       *data++ = priv->stats.tx_index_errors;
+       *data++ = priv->stats.sw_config_errors;
+       *data++ = priv->stats.sw_access_errors;
+       *data++ = priv->stats.rx_truncate_errors;
+       *data++ = priv->stats.rx_mac_errors;
+       *data++ = (priv->stats.rx_din_dropped_pkts +
+                  readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER));
+       *data++ = priv->stats.tx_fifo_full;
+       *data++ = (priv->stats.rx_filter_passed_pkts +
+                  readq(priv->base + MLXBF_GIGE_RX_PASS_COUNTER_ALL));
+       *data++ = (priv->stats.rx_filter_discard_pkts +
+                  readq(priv->base + MLXBF_GIGE_RX_DISC_COUNTER_ALL));
+}
+
+static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
+                                     struct ethtool_pauseparam *pause)
+{
+       pause->autoneg = AUTONEG_DISABLE;
+       pause->rx_pause = 1;
+       pause->tx_pause = 1;
+}
+
+const struct ethtool_ops mlxbf_gige_ethtool_ops = {
+       .get_link               = ethtool_op_get_link,
+       .get_ringparam          = mlxbf_gige_get_ringparam,
+       .get_regs_len           = mlxbf_gige_get_regs_len,
+       .get_regs               = mlxbf_gige_get_regs,
+       .get_strings            = mlxbf_gige_get_strings,
+       .get_sset_count         = mlxbf_gige_get_sset_count,
+       .get_ethtool_stats      = mlxbf_gige_get_ethtool_stats,
+       .nway_reset             = phy_ethtool_nway_reset,
+       .get_pauseparam         = mlxbf_gige_get_pauseparam,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c
new file mode 100644 (file)
index 0000000..a8d966d
--- /dev/null
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Initialize and handle GPIO interrupt triggered by INT_N PHY signal.
+ * This GPIO interrupt triggers the PHY state machine to bring the link
+ * up/down.
+ *
+ * Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+#define MLXBF_GIGE_GPIO_CAUSE_FALL_EN          0x48
+#define MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0  0x80
+#define MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0                0x94
+#define MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE      0x98
+
+static void mlxbf_gige_gpio_enable(struct mlxbf_gige *priv)
+{
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->gpio_lock, flags);
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+
+       /* The INT_N interrupt level is active low.
+        * So enable cause fall bit to detect when GPIO
+        * state goes low.
+        */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_FALL_EN);
+
+       /* Enable PHY interrupt by setting the priority level */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       spin_unlock_irqrestore(&priv->gpio_lock, flags);
+}
+
+static void mlxbf_gige_gpio_disable(struct mlxbf_gige *priv)
+{
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&priv->gpio_lock, flags);
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       val &= ~priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_EVTEN0);
+       spin_unlock_irqrestore(&priv->gpio_lock, flags);
+}
+
+static irqreturn_t mlxbf_gige_gpio_handler(int irq, void *ptr)
+{
+       struct mlxbf_gige *priv;
+       u32 val;
+
+       priv = ptr;
+
+       /* Check if this interrupt is from PHY device.
+        * Return if it is not.
+        */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CAUSE_EVTEN0);
+       if (!(val & priv->phy_int_gpio_mask))
+               return IRQ_NONE;
+
+       /* Clear interrupt when done, otherwise, no further interrupt
+        * will be triggered.
+        */
+       val = readl(priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+       val |= priv->phy_int_gpio_mask;
+       writel(val, priv->gpio_io + MLXBF_GIGE_GPIO_CAUSE_OR_CLRCAUSE);
+
+       generic_handle_irq(priv->phy_irq);
+
+       return IRQ_HANDLED;
+}
+
+static void mlxbf_gige_gpio_mask(struct irq_data *irqd)
+{
+       struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+
+       mlxbf_gige_gpio_disable(priv);
+}
+
+static void mlxbf_gige_gpio_unmask(struct irq_data *irqd)
+{
+       struct mlxbf_gige *priv = irq_data_get_irq_chip_data(irqd);
+
+       mlxbf_gige_gpio_enable(priv);
+}
+
+static struct irq_chip mlxbf_gige_gpio_chip = {
+       .name                   = "mlxbf_gige_phy",
+       .irq_mask               = mlxbf_gige_gpio_mask,
+       .irq_unmask             = mlxbf_gige_gpio_unmask,
+};
+
+static int mlxbf_gige_gpio_domain_map(struct irq_domain *d,
+                                     unsigned int irq,
+                                     irq_hw_number_t hwirq)
+{
+       irq_set_chip_data(irq, d->host_data);
+       irq_set_chip_and_handler(irq, &mlxbf_gige_gpio_chip, handle_simple_irq);
+       irq_set_noprobe(irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops mlxbf_gige_gpio_domain_ops = {
+       .map    = mlxbf_gige_gpio_domain_map,
+       .xlate  = irq_domain_xlate_twocell,
+};
+
+#ifdef CONFIG_ACPI
+static int mlxbf_gige_gpio_resources(struct acpi_resource *ares,
+                                    void *data)
+{
+       struct acpi_resource_gpio *gpio;
+       u32 *phy_int_gpio = data;
+
+       if (ares->type == ACPI_RESOURCE_TYPE_GPIO) {
+               gpio = &ares->data.gpio;
+               *phy_int_gpio = gpio->pin_table[0];
+       }
+
+       return 1;
+}
+#endif
+
+void mlxbf_gige_gpio_free(struct mlxbf_gige *priv)
+{
+       irq_dispose_mapping(priv->phy_irq);
+       irq_domain_remove(priv->irqdomain);
+}
+
+int mlxbf_gige_gpio_init(struct platform_device *pdev,
+                        struct mlxbf_gige *priv)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       u32 phy_int_gpio = 0;
+       int ret;
+
+       LIST_HEAD(resources);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_GPIO0);
+       if (!res)
+               return -ENODEV;
+
+       priv->gpio_io = devm_ioremap(dev, res->start, resource_size(res));
+       if (!priv->gpio_io)
+               return -ENOMEM;
+
+#ifdef CONFIG_ACPI
+       ret = acpi_dev_get_resources(ACPI_COMPANION(dev),
+                                    &resources, mlxbf_gige_gpio_resources,
+                                    &phy_int_gpio);
+       acpi_dev_free_resource_list(&resources);
+       if (ret < 0 || !phy_int_gpio) {
+               dev_err(dev, "Error retrieving the gpio phy pin");
+               return -EINVAL;
+       }
+#endif
+
+       priv->phy_int_gpio_mask = BIT(phy_int_gpio);
+
+       mlxbf_gige_gpio_disable(priv);
+
+       priv->hw_phy_irq = platform_get_irq(pdev, MLXBF_GIGE_PHY_INT_N);
+
+       priv->irqdomain = irq_domain_add_simple(NULL, 1, 0,
+                                               &mlxbf_gige_gpio_domain_ops,
+                                               priv);
+       if (!priv->irqdomain) {
+               dev_err(dev, "Failed to add IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       priv->phy_irq = irq_create_mapping(priv->irqdomain, 0);
+       if (!priv->phy_irq) {
+               irq_domain_remove(priv->irqdomain);
+               priv->irqdomain = NULL;
+               dev_err(dev, "Error mapping PHY IRQ\n");
+               return -EINVAL;
+       }
+
+       ret = devm_request_irq(dev, priv->hw_phy_irq, mlxbf_gige_gpio_handler,
+                              IRQF_ONESHOT | IRQF_SHARED, "mlxbf_gige_phy", priv);
+       if (ret) {
+               dev_err(dev, "Failed to request PHY IRQ");
+               mlxbf_gige_gpio_free(priv);
+               return ret;
+       }
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
new file mode 100644 (file)
index 0000000..c38795b
--- /dev/null
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Interrupt related logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/interrupt.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+static irqreturn_t mlxbf_gige_error_intr(int irq, void *dev_id)
+{
+       struct mlxbf_gige *priv;
+       u64 int_status;
+
+       priv = dev_id;
+
+       priv->error_intr_count++;
+
+       int_status = readq(priv->base + MLXBF_GIGE_INT_STATUS);
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR)
+               priv->stats.hw_access_errors++;
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS) {
+               priv->stats.tx_invalid_checksums++;
+               /* This error condition is latched into MLXBF_GIGE_INT_STATUS
+                * when the GigE silicon operates on the offending
+                * TX WQE. The write to MLXBF_GIGE_INT_STATUS at the bottom
+                * of this routine clears this error condition.
+                */
+       }
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE) {
+               priv->stats.tx_small_frames++;
+               /* This condition happens when the networking stack invokes
+                * this driver's "start_xmit()" method with a packet whose
+                * size < 60 bytes.  The GigE silicon will automatically pad
+                * this small frame up to a minimum-sized frame before it is
+                * sent. The "tx_small_frame" condition is latched into the
+                * MLXBF_GIGE_INT_STATUS register when the GigE silicon
+                * operates on the offending TX WQE. The write to
+                * MLXBF_GIGE_INT_STATUS at the bottom of this routine
+                * clears this condition.
+                */
+       }
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE)
+               priv->stats.tx_index_errors++;
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR)
+               priv->stats.sw_config_errors++;
+
+       if (int_status & MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR)
+               priv->stats.sw_access_errors++;
+
+       /* Clear all error interrupts by writing '1' back to
+        * all the asserted bits in INT_STATUS.  Do not write
+        * '1' back to 'receive packet' bit, since that is
+        * managed separately.
+        */
+
+       int_status &= ~MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET;
+
+       writeq(int_status, priv->base + MLXBF_GIGE_INT_STATUS);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id)
+{
+       struct mlxbf_gige *priv;
+
+       priv = dev_id;
+
+       priv->rx_intr_count++;
+
+       /* NOTE: GigE silicon automatically disables "packet rx" interrupt by
+        *       setting MLXBF_GIGE_INT_MASK bit0 upon triggering the interrupt
+        *       to the ARM cores.  Software needs to re-enable "packet rx"
+        *       interrupts by clearing MLXBF_GIGE_INT_MASK bit0.
+        */
+
+       napi_schedule(&priv->napi);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mlxbf_gige_llu_plu_intr(int irq, void *dev_id)
+{
+       struct mlxbf_gige *priv;
+
+       priv = dev_id;
+       priv->llu_plu_intr_count++;
+
+       return IRQ_HANDLED;
+}
+
+int mlxbf_gige_request_irqs(struct mlxbf_gige *priv)
+{
+       int err;
+
+       err = request_irq(priv->error_irq, mlxbf_gige_error_intr, 0,
+                         "mlxbf_gige_error", priv);
+       if (err) {
+               dev_err(priv->dev, "Request error_irq failure\n");
+               return err;
+       }
+
+       err = request_irq(priv->rx_irq, mlxbf_gige_rx_intr, 0,
+                         "mlxbf_gige_rx", priv);
+       if (err) {
+               dev_err(priv->dev, "Request rx_irq failure\n");
+               goto free_error_irq;
+       }
+
+       err = request_irq(priv->llu_plu_irq, mlxbf_gige_llu_plu_intr, 0,
+                         "mlxbf_gige_llu_plu", priv);
+       if (err) {
+               dev_err(priv->dev, "Request llu_plu_irq failure\n");
+               goto free_rx_irq;
+       }
+
+       return 0;
+
+free_rx_irq:
+       free_irq(priv->rx_irq, priv);
+
+free_error_irq:
+       free_irq(priv->error_irq, priv);
+
+       return err;
+}
+
+void mlxbf_gige_free_irqs(struct mlxbf_gige *priv)
+{
+       free_irq(priv->error_irq, priv);
+       free_irq(priv->rx_irq, priv);
+       free_irq(priv->llu_plu_irq, priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
new file mode 100644 (file)
index 0000000..a0a059e
--- /dev/null
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Gigabit Ethernet driver for Mellanox BlueField SoC
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+#define DRV_NAME    "mlxbf_gige"
+
+/* Allocate SKB whose payload pointer aligns with the Bluefield
+ * hardware DMA limitation, i.e. DMA operation can't cross
+ * a 4KB boundary.  A maximum packet size of 2KB is assumed in the
+ * alignment formula.  The alignment logic overallocates an SKB,
+ * and then adjusts the headroom so that the SKB data pointer is
+ * naturally aligned to a 2KB boundary.
+ */
+struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
+                                    unsigned int map_len,
+                                    dma_addr_t *buf_dma,
+                                    enum dma_data_direction dir)
+{
+       struct sk_buff *skb;
+       u64 addr, offset;
+
+       /* Overallocate the SKB so that any headroom adjustment (to
+        * provide 2KB natural alignment) does not exceed payload area
+        */
+       skb = netdev_alloc_skb(priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
+       if (!skb)
+               return NULL;
+
+       /* Adjust the headroom so that skb->data is naturally aligned to
+        * a 2KB boundary, which is the maximum packet size supported.
+        */
+       addr = (long)skb->data;
+       offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
+               ~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
+       offset -= addr;
+       if (offset)
+               skb_reserve(skb, offset);
+
+       /* Return streaming DMA mapping to caller */
+       *buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
+       if (dma_mapping_error(priv->dev, *buf_dma)) {
+               dev_kfree_skb(skb);
+               *buf_dma = (dma_addr_t)0;
+               return NULL;
+       }
+
+       return skb;
+}
+
+static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
+{
+       u8 mac[ETH_ALEN];
+       u64 local_mac;
+
+       memset(mac, 0, ETH_ALEN);
+       mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
+                                    &local_mac);
+       u64_to_ether_addr(local_mac, mac);
+
+       if (is_valid_ether_addr(mac)) {
+               ether_addr_copy(priv->netdev->dev_addr, mac);
+       } else {
+               /* Provide a random MAC if for some reason the device has
+                * not been configured with a valid MAC address already.
+                */
+               eth_hw_addr_random(priv->netdev);
+       }
+
+       local_mac = ether_addr_to_u64(priv->netdev->dev_addr);
+       mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
+                                    local_mac);
+}
+
+static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
+{
+       struct mlxbf_gige_stats *p;
+
+       /* Cache stats that will be cleared by clean port operation */
+       p = &priv->stats;
+       p->rx_din_dropped_pkts += readq(priv->base +
+                                       MLXBF_GIGE_RX_DIN_DROP_COUNTER);
+       p->rx_filter_passed_pkts += readq(priv->base +
+                                         MLXBF_GIGE_RX_PASS_COUNTER_ALL);
+       p->rx_filter_discard_pkts += readq(priv->base +
+                                          MLXBF_GIGE_RX_DISC_COUNTER_ALL);
+}
+
+static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
+{
+       u64 control;
+       u64 temp;
+       int err;
+
+       /* Set the CLEAN_PORT_EN bit to trigger SW reset */
+       control = readq(priv->base + MLXBF_GIGE_CONTROL);
+       control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
+       writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
+       /* Ensure completion of "clean port" write before polling status */
+       mb();
+
+       err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
+                                       (temp & MLXBF_GIGE_STATUS_READY),
+                                       100, 100000);
+
+       /* Clear the CLEAN_PORT_EN bit at end of this loop */
+       control = readq(priv->base + MLXBF_GIGE_CONTROL);
+       control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
+       writeq(control, priv->base + MLXBF_GIGE_CONTROL);
+
+       return err;
+}
+
+static int mlxbf_gige_open(struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+       struct phy_device *phydev = netdev->phydev;
+       u64 int_en;
+       int err;
+
+       err = mlxbf_gige_request_irqs(priv);
+       if (err)
+               return err;
+       mlxbf_gige_cache_stats(priv);
+       err = mlxbf_gige_clean_port(priv);
+       if (err)
+               goto free_irqs;
+       err = mlxbf_gige_rx_init(priv);
+       if (err)
+               goto free_irqs;
+       err = mlxbf_gige_tx_init(priv);
+       if (err)
+               goto rx_deinit;
+
+       phy_start(phydev);
+
+       netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
+       napi_enable(&priv->napi);
+       netif_start_queue(netdev);
+
+       /* Set bits in INT_EN that we care about */
+       int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
+                MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
+                MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
+                MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
+                MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
+                MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
+                MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
+
+       /* Ensure completion of all initialization before enabling interrupts */
+       mb();
+
+       writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
+
+       return 0;
+
+rx_deinit:
+       mlxbf_gige_rx_deinit(priv);
+
+free_irqs:
+       mlxbf_gige_free_irqs(priv);
+       return err;
+}
+
+static int mlxbf_gige_stop(struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+       netif_stop_queue(netdev);
+       napi_disable(&priv->napi);
+       netif_napi_del(&priv->napi);
+       mlxbf_gige_free_irqs(priv);
+
+       phy_stop(netdev->phydev);
+
+       mlxbf_gige_rx_deinit(priv);
+       mlxbf_gige_tx_deinit(priv);
+       mlxbf_gige_cache_stats(priv);
+       mlxbf_gige_clean_port(priv);
+
+       return 0;
+}
+
+static int mlxbf_gige_do_ioctl(struct net_device *netdev,
+                              struct ifreq *ifr, int cmd)
+{
+       if (!(netif_running(netdev)))
+               return -EINVAL;
+
+       return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+       bool new_promisc_enabled;
+
+       new_promisc_enabled = netdev->flags & IFF_PROMISC;
+
+       /* Only write to the hardware registers if the new setting
+        * of promiscuous mode is different from the current one.
+        */
+       if (new_promisc_enabled != priv->promisc_enabled) {
+               priv->promisc_enabled = new_promisc_enabled;
+
+               if (new_promisc_enabled)
+                       mlxbf_gige_enable_promisc(priv);
+               else
+                       mlxbf_gige_disable_promisc(priv);
+       }
+}
+
+static void mlxbf_gige_get_stats64(struct net_device *netdev,
+                                  struct rtnl_link_stats64 *stats)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+
+       netdev_stats_to_stats64(stats, &netdev->stats);
+
+       stats->rx_length_errors = priv->stats.rx_truncate_errors;
+       stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
+                               readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
+       stats->rx_crc_errors = priv->stats.rx_mac_errors;
+       stats->rx_errors = stats->rx_length_errors +
+                          stats->rx_fifo_errors +
+                          stats->rx_crc_errors;
+
+       stats->tx_fifo_errors = priv->stats.tx_fifo_full;
+       stats->tx_errors = stats->tx_fifo_errors;
+}
+
+static const struct net_device_ops mlxbf_gige_netdev_ops = {
+       .ndo_open               = mlxbf_gige_open,
+       .ndo_stop               = mlxbf_gige_stop,
+       .ndo_start_xmit         = mlxbf_gige_start_xmit,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = mlxbf_gige_do_ioctl,
+       .ndo_set_rx_mode        = mlxbf_gige_set_rx_mode,
+       .ndo_get_stats64        = mlxbf_gige_get_stats64,
+};
+
+static void mlxbf_gige_adjust_link(struct net_device *netdev)
+{
+       struct phy_device *phydev = netdev->phydev;
+
+       phy_print_status(phydev);
+}
+
+static int mlxbf_gige_probe(struct platform_device *pdev)
+{
+       struct phy_device *phydev;
+       struct net_device *netdev;
+       struct resource *mac_res;
+       struct resource *llu_res;
+       struct resource *plu_res;
+       struct mlxbf_gige *priv;
+       void __iomem *llu_base;
+       void __iomem *plu_base;
+       void __iomem *base;
+       u64 control;
+       int addr;
+       int err;
+
+       mac_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MAC);
+       if (!mac_res)
+               return -ENXIO;
+
+       base = devm_ioremap_resource(&pdev->dev, mac_res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       llu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_LLU);
+       if (!llu_res)
+               return -ENXIO;
+
+       llu_base = devm_ioremap_resource(&pdev->dev, llu_res);
+       if (IS_ERR(llu_base))
+               return PTR_ERR(llu_base);
+
+       plu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_PLU);
+       if (!plu_res)
+               return -ENXIO;
+
+       plu_base = devm_ioremap_resource(&pdev->dev, plu_res);
+       if (IS_ERR(plu_base))
+               return PTR_ERR(plu_base);
+
+       /* Perform general init of GigE block */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control |= MLXBF_GIGE_CONTROL_PORT_EN;
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+
+       netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
+       if (!netdev)
+               return -ENOMEM;
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+       netdev->netdev_ops = &mlxbf_gige_netdev_ops;
+       netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
+       priv = netdev_priv(netdev);
+       priv->netdev = netdev;
+
+       platform_set_drvdata(pdev, priv);
+       priv->dev = &pdev->dev;
+       priv->pdev = pdev;
+
+       spin_lock_init(&priv->lock);
+       spin_lock_init(&priv->gpio_lock);
+
+       /* Attach MDIO device */
+       err = mlxbf_gige_mdio_probe(pdev, priv);
+       if (err)
+               return err;
+
+       err = mlxbf_gige_gpio_init(pdev, priv);
+       if (err) {
+               dev_err(&pdev->dev, "PHY IRQ initialization failed\n");
+               mlxbf_gige_mdio_remove(priv);
+               return -ENODEV;
+       }
+
+       priv->base = base;
+       priv->llu_base = llu_base;
+       priv->plu_base = plu_base;
+
+       priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
+       priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+
+       /* Write initial MAC address to hardware */
+       mlxbf_gige_initial_mac(priv);
+
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (err) {
+               dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+               goto out;
+       }
+
+       priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
+       priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
+       priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
+
+       phydev = phy_find_first(priv->mdiobus);
+       if (!phydev) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       addr = phydev->mdio.addr;
+       priv->mdiobus->irq[addr] = priv->phy_irq;
+       phydev->irq = priv->phy_irq;
+
+       err = phy_connect_direct(netdev, phydev,
+                                mlxbf_gige_adjust_link,
+                                PHY_INTERFACE_MODE_GMII);
+       if (err) {
+               dev_err(&pdev->dev, "Could not attach to PHY\n");
+               goto out;
+       }
+
+       /* MAC only supports 1000T full duplex mode */
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+       phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+
+       /* Only symmetric pause with flow control enabled is supported so no
+        * need to negotiate pause.
+        */
+       linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+       linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+
+       /* Display information about attached PHY device */
+       phy_attached_info(phydev);
+
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register netdev\n");
+               phy_disconnect(phydev);
+               goto out;
+       }
+
+       return 0;
+
+out:
+       mlxbf_gige_gpio_free(priv);
+       mlxbf_gige_mdio_remove(priv);
+       return err;
+}
+
+static int mlxbf_gige_remove(struct platform_device *pdev)
+{
+       struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+
+       unregister_netdev(priv->netdev);
+       phy_disconnect(priv->netdev->phydev);
+       mlxbf_gige_gpio_free(priv);
+       mlxbf_gige_mdio_remove(priv);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static void mlxbf_gige_shutdown(struct platform_device *pdev)
+{
+       struct mlxbf_gige *priv = platform_get_drvdata(pdev);
+
+       writeq(0, priv->base + MLXBF_GIGE_INT_EN);
+       mlxbf_gige_clean_port(priv);
+}
+
+static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
+       { "MLNXBF17", 0 },
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
+
+static struct platform_driver mlxbf_gige_driver = {
+       .probe = mlxbf_gige_probe,
+       .remove = mlxbf_gige_remove,
+       .shutdown = mlxbf_gige_shutdown,
+       .driver = {
+               .name = DRV_NAME,
+               .acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
+       },
+};
+
+module_platform_driver(mlxbf_gige_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
+MODULE_AUTHOR("David Thompson <davthompson@nvidia.com>");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
new file mode 100644 (file)
index 0000000..e32dd34
--- /dev/null
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* MDIO support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "mlxbf_gige.h"
+
+#define MLXBF_GIGE_MDIO_GW_OFFSET      0x0
+#define MLXBF_GIGE_MDIO_CFG_OFFSET     0x4
+
+/* Support clause 22 */
+#define MLXBF_GIGE_MDIO_CL22_ST1       0x1
+#define MLXBF_GIGE_MDIO_CL22_WRITE     0x1
+#define MLXBF_GIGE_MDIO_CL22_READ      0x2
+
+/* Busy bit is set by software and cleared by hardware */
+#define MLXBF_GIGE_MDIO_SET_BUSY       0x1
+
+/* MDIO GW register bits */
+#define MLXBF_GIGE_MDIO_GW_AD_MASK     GENMASK(15, 0)
+#define MLXBF_GIGE_MDIO_GW_DEVAD_MASK  GENMASK(20, 16)
+#define MLXBF_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
+#define MLXBF_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
+#define MLXBF_GIGE_MDIO_GW_ST1_MASK    GENMASK(28, 28)
+#define MLXBF_GIGE_MDIO_GW_BUSY_MASK   GENMASK(30, 30)
+
+/* MDIO config register bits */
+#define MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK             GENMASK(1, 0)
+#define MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK               GENMASK(2, 2)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK       GENMASK(4, 4)
+#define MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK            GENMASK(15, 8)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK          GENMASK(23, 16)
+#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK         GENMASK(31, 24)
+
+/* Formula for encoding the MDIO period. The encoded value is
+ * passed to the MDIO config register.
+ *
+ * mdc_clk = 2*(val + 1)*i1clk
+ *
+ * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ *
+ * val = (((400 * 430 / 1000) / 2) - 1)
+ */
+#define MLXBF_GIGE_I1CLK_MHZ           430
+#define MLXBF_GIGE_MDC_CLK_NS          400
+
+#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
+                                           MLXBF_GIGE_MDIO_PERIOD) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+                                FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
+                                     int phy_reg, u32 opcode)
+{
+       u32 gw_reg = 0;
+
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_AD_MASK, data);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_DEVAD_MASK, phy_reg);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_PARTAD_MASK, phy_add);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_OPCODE_MASK, opcode);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_ST1_MASK,
+                            MLXBF_GIGE_MDIO_CL22_ST1);
+       gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_BUSY_MASK,
+                            MLXBF_GIGE_MDIO_SET_BUSY);
+
+       return gw_reg;
+}
+
+static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
+{
+       struct mlxbf_gige *priv = bus->priv;
+       u32 cmd;
+       int ret;
+       u32 val;
+
+       if (phy_reg & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
+       /* Send mdio read request */
+       cmd = mlxbf_gige_mdio_create_cmd(0, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_READ);
+
+       writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
+       ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
+                                       val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+
+       if (ret) {
+               writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+               return ret;
+       }
+
+       ret = readl(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+       /* Only return ad bits of the gw register */
+       ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+
+       return ret;
+}
+
+static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
+                                int phy_reg, u16 val)
+{
+       struct mlxbf_gige *priv = bus->priv;
+       u32 cmd;
+       int ret;
+       u32 temp;
+
+       if (phy_reg & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
+       /* Send mdio write request */
+       cmd = mlxbf_gige_mdio_create_cmd(val, phy_add, phy_reg,
+                                        MLXBF_GIGE_MDIO_CL22_WRITE);
+       writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
+       /* If the poll timed out, drop the request */
+       ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
+                                       temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000);
+
+       return ret;
+}
+
+int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MDIO9);
+       if (!res)
+               return -ENODEV;
+
+       priv->mdio_io = devm_ioremap_resource(dev, res);
+       if (IS_ERR(priv->mdio_io))
+               return PTR_ERR(priv->mdio_io);
+
+       /* Configure mdio parameters */
+       writel(MLXBF_GIGE_MDIO_CFG_VAL,
+              priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+
+       priv->mdiobus = devm_mdiobus_alloc(dev);
+       if (!priv->mdiobus) {
+               dev_err(dev, "Failed to alloc MDIO bus\n");
+               return -ENOMEM;
+       }
+
+       priv->mdiobus->name = "mlxbf-mdio";
+       priv->mdiobus->read = mlxbf_gige_mdio_read;
+       priv->mdiobus->write = mlxbf_gige_mdio_write;
+       priv->mdiobus->parent = dev;
+       priv->mdiobus->priv = priv;
+       snprintf(priv->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+                dev_name(dev));
+
+       ret = mdiobus_register(priv->mdiobus);
+       if (ret)
+               dev_err(dev, "Failed to register MDIO bus\n");
+
+       return ret;
+}
+
+void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv)
+{
+       mdiobus_unregister(priv->mdiobus);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
new file mode 100644 (file)
index 0000000..5fb33c9
--- /dev/null
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* Header file for Mellanox BlueField GigE register defines
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#ifndef __MLXBF_GIGE_REGS_H__
+#define __MLXBF_GIGE_REGS_H__
+
+#define MLXBF_GIGE_STATUS                             0x0010
+#define MLXBF_GIGE_STATUS_READY                       BIT(0)
+#define MLXBF_GIGE_INT_STATUS                         0x0028
+#define MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET       BIT(0)
+#define MLXBF_GIGE_INT_STATUS_RX_MAC_ERROR            BIT(1)
+#define MLXBF_GIGE_INT_STATUS_RX_TRN_ERROR            BIT(2)
+#define MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR         BIT(3)
+#define MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR         BIT(4)
+#define MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE BIT(5)
+#define MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE     BIT(6)
+#define MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS      BIT(7)
+#define MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR         BIT(8)
+#define MLXBF_GIGE_INT_EN                             0x0030
+#define MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET           BIT(0)
+#define MLXBF_GIGE_INT_EN_RX_MAC_ERROR                BIT(1)
+#define MLXBF_GIGE_INT_EN_RX_TRN_ERROR                BIT(2)
+#define MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR             BIT(3)
+#define MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR             BIT(4)
+#define MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE     BIT(5)
+#define MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE         BIT(6)
+#define MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS          BIT(7)
+#define MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR             BIT(8)
+#define MLXBF_GIGE_INT_MASK                           0x0038
+#define MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET         BIT(0)
+#define MLXBF_GIGE_CONTROL                            0x0040
+#define MLXBF_GIGE_CONTROL_PORT_EN                    BIT(0)
+#define MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN            BIT(1)
+#define MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC            BIT(4)
+#define MLXBF_GIGE_CONTROL_CLEAN_PORT_EN              BIT(31)
+#define MLXBF_GIGE_RX_WQ_BASE                         0x0200
+#define MLXBF_GIGE_RX_WQE_SIZE_LOG2                   0x0208
+#define MLXBF_GIGE_RX_WQE_SIZE_LOG2_RESET_VAL         7
+#define MLXBF_GIGE_RX_CQ_BASE                         0x0210
+#define MLXBF_GIGE_TX_WQ_BASE                         0x0218
+#define MLXBF_GIGE_TX_WQ_SIZE_LOG2                    0x0220
+#define MLXBF_GIGE_TX_WQ_SIZE_LOG2_RESET_VAL          7
+#define MLXBF_GIGE_TX_CI_UPDATE_ADDRESS               0x0228
+#define MLXBF_GIGE_RX_WQE_PI                          0x0230
+#define MLXBF_GIGE_TX_PRODUCER_INDEX                  0x0238
+#define MLXBF_GIGE_RX_MAC_FILTER                      0x0240
+#define MLXBF_GIGE_RX_MAC_FILTER_STRIDE               0x0008
+#define MLXBF_GIGE_RX_DIN_DROP_COUNTER                0x0260
+#define MLXBF_GIGE_TX_CONSUMER_INDEX                  0x0310
+#define MLXBF_GIGE_TX_CONTROL                         0x0318
+#define MLXBF_GIGE_TX_CONTROL_GRACEFUL_STOP           BIT(0)
+#define MLXBF_GIGE_TX_STATUS                          0x0388
+#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL           BIT(1)
+#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START     0x0520
+#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END       0x0528
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC           0x0540
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN        BIT(0)
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS           0x0548
+#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN        BIT(0)
+#define MLXBF_GIGE_RX_PASS_COUNTER_ALL                0x0550
+#define MLXBF_GIGE_RX_DISC_COUNTER_ALL                0x0560
+#define MLXBF_GIGE_RX                                 0x0578
+#define MLXBF_GIGE_RX_STRIP_CRC_EN                    BIT(1)
+#define MLXBF_GIGE_RX_DMA                             0x0580
+#define MLXBF_GIGE_RX_DMA_EN                          BIT(0)
+#define MLXBF_GIGE_RX_CQE_PACKET_CI                   0x05b0
+#define MLXBF_GIGE_MAC_CFG                            0x05e8
+
+/* NOTE: MLXBF_GIGE_MAC_CFG is the last defined register offset,
+ * so use that plus size of single register to derive total size
+ */
+#define MLXBF_GIGE_MMIO_REG_SZ                        (MLXBF_GIGE_MAC_CFG + 8)
+
+#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
new file mode 100644 (file)
index 0000000..afa3b92
--- /dev/null
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Packet receive logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 dmac)
+{
+       void __iomem *base = priv->base;
+       u64 control;
+
+       /* Write destination MAC to specified MAC RX filter */
+       writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+              (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+
+       /* Enable MAC receive filter mask for specified index */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+}
+
+void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
+                                 unsigned int index, u64 *dmac)
+{
+       void __iomem *base = priv->base;
+
+       /* Read destination MAC from specified MAC RX filter */
+       *dmac = readq(base + MLXBF_GIGE_RX_MAC_FILTER +
+                     (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+}
+
+void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
+{
+       void __iomem *base = priv->base;
+       u64 control;
+       u64 end_mac;
+
+       /* Enable MAC_ID_RANGE match functionality */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+
+       /* Set start of destination MAC range check to 0 */
+       writeq(0, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
+
+       /* Set end of destination MAC range check to all FFs */
+       end_mac = BCAST_MAC_ADDR;
+       writeq(end_mac, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
+}
+
+void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
+{
+       void __iomem *base = priv->base;
+       u64 control;
+
+       /* Disable MAC_ID_RANGE match functionality */
+       control = readq(base + MLXBF_GIGE_CONTROL);
+       control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
+       writeq(control, base + MLXBF_GIGE_CONTROL);
+
+       /* NOTE: no need to change DMAC_RANGE_START or END;
+        * those values are ignored since MAC_ID_RANGE_EN=0
+        */
+}
+
+/* Receive Initialization
+ * 1) Configures RX MAC filters via MMIO registers
+ * 2) Allocates RX WQE array using coherent DMA mapping
+ * 3) Initializes each element of RX WQE array with a receive
+ *    buffer pointer (also using coherent DMA mapping)
+ * 4) Allocates RX CQE array using coherent DMA mapping
+ * 5) Completes other misc receive initialization
+ */
+int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
+{
+       size_t wq_size, cq_size;
+       dma_addr_t *rx_wqe_ptr;
+       dma_addr_t rx_buf_dma;
+       u64 data;
+       int i, j;
+
+       /* Configure MAC RX filter #0 to allow RX of broadcast pkts */
+       mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
+                                    BCAST_MAC_ADDR);
+
+       wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
+       priv->rx_wqe_base = dma_alloc_coherent(priv->dev, wq_size,
+                                              &priv->rx_wqe_base_dma,
+                                              GFP_KERNEL);
+       if (!priv->rx_wqe_base)
+               return -ENOMEM;
+
+       /* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
+        * Each RX WQE is simply a receive buffer pointer, so walk
+        * the entire array, allocating a 2KB buffer for each element
+        */
+       rx_wqe_ptr = priv->rx_wqe_base;
+
+       for (i = 0; i < priv->rx_q_entries; i++) {
+               priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
+                                                      &rx_buf_dma, DMA_FROM_DEVICE);
+               if (!priv->rx_skb[i])
+                       goto free_wqe_and_skb;
+               *rx_wqe_ptr++ = rx_buf_dma;
+       }
+
+       /* Write RX WQE base address into MMIO reg */
+       writeq(priv->rx_wqe_base_dma, priv->base + MLXBF_GIGE_RX_WQ_BASE);
+
+       cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
+       priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size,
+                                              &priv->rx_cqe_base_dma,
+                                              GFP_KERNEL);
+       if (!priv->rx_cqe_base)
+               goto free_wqe_and_skb;
+
+       for (i = 0; i < priv->rx_q_entries; i++)
+               priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
+
+       /* Write RX CQE base address into MMIO reg */
+       writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
+
+       /* Write RX_WQE_PI with current number of replenished buffers */
+       writeq(priv->rx_q_entries, priv->base + MLXBF_GIGE_RX_WQE_PI);
+
+       /* Enable removal of CRC during RX */
+       data = readq(priv->base + MLXBF_GIGE_RX);
+       data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
+       writeq(data, priv->base + MLXBF_GIGE_RX);
+
+       /* Enable RX MAC filter pass and discard counters */
+       writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
+              priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
+       writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
+              priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
+
+       /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+        * indicate readiness to receive interrupts
+        */
+       data = readq(priv->base + MLXBF_GIGE_INT_MASK);
+       data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
+       writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
+
+       /* Enable RX DMA to write new packets to memory */
+       data = readq(priv->base + MLXBF_GIGE_RX_DMA);
+       data |= MLXBF_GIGE_RX_DMA_EN;
+       writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+       writeq(ilog2(priv->rx_q_entries),
+              priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
+
+       return 0;
+
+free_wqe_and_skb:
+       rx_wqe_ptr = priv->rx_wqe_base;
+       for (j = 0; j < i; j++) {
+               dma_unmap_single(priv->dev, *rx_wqe_ptr,
+                                MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+               dev_kfree_skb(priv->rx_skb[j]);
+               rx_wqe_ptr++;
+       }
+       dma_free_coherent(priv->dev, wq_size,
+                         priv->rx_wqe_base, priv->rx_wqe_base_dma);
+       return -ENOMEM;
+}
+
+/* Receive Deinitialization
+ * This routine will free allocations done by mlxbf_gige_rx_init(),
+ * namely the RX WQE and RX CQE arrays, as well as all RX buffers
+ */
+void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
+{
+       dma_addr_t *rx_wqe_ptr;
+       size_t size;
+       u64 data;
+       int i;
+
+       /* Disable RX DMA to prevent packet transfers to memory */
+       data = readq(priv->base + MLXBF_GIGE_RX_DMA);
+       data &= ~MLXBF_GIGE_RX_DMA_EN;
+       writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+
+       rx_wqe_ptr = priv->rx_wqe_base;
+
+       for (i = 0; i < priv->rx_q_entries; i++) {
+               dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb(priv->rx_skb[i]);
+               rx_wqe_ptr++;
+       }
+
+       size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
+       dma_free_coherent(priv->dev, size,
+                         priv->rx_wqe_base, priv->rx_wqe_base_dma);
+
+       size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
+       dma_free_coherent(priv->dev, size,
+                         priv->rx_cqe_base, priv->rx_cqe_base_dma);
+
+       priv->rx_wqe_base = NULL;
+       priv->rx_wqe_base_dma = 0;
+       priv->rx_cqe_base = NULL;
+       priv->rx_cqe_base_dma = 0;
+       writeq(0, priv->base + MLXBF_GIGE_RX_WQ_BASE);
+       writeq(0, priv->base + MLXBF_GIGE_RX_CQ_BASE);
+}
+
+static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
+{
+       struct net_device *netdev = priv->netdev;
+       struct sk_buff *skb = NULL, *rx_skb;
+       u16 rx_pi_rem, rx_ci_rem;
+       dma_addr_t *rx_wqe_addr;
+       dma_addr_t rx_buf_dma;
+       u64 *rx_cqe_addr;
+       u64 datalen;
+       u64 rx_cqe;
+       u16 rx_ci;
+       u16 rx_pi;
+
+       /* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
+       rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
+       rx_pi_rem = rx_pi % priv->rx_q_entries;
+
+       rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
+       rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
+       rx_cqe = *rx_cqe_addr;
+
+       if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
+               return false;
+
+       if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
+               /* Packet is OK, increment stats */
+               datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
+               netdev->stats.rx_packets++;
+               netdev->stats.rx_bytes += datalen;
+
+               skb = priv->rx_skb[rx_pi_rem];
+
+               skb_put(skb, datalen);
+
+               skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
+
+               skb->protocol = eth_type_trans(skb, netdev);
+
+               /* Alloc another RX SKB for this same index */
+               rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
+                                             &rx_buf_dma, DMA_FROM_DEVICE);
+               if (!rx_skb)
+                       return false;
+               priv->rx_skb[rx_pi_rem] = rx_skb;
+               dma_unmap_single(priv->dev, *rx_wqe_addr,
+                                MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+               *rx_wqe_addr = rx_buf_dma;
+       } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
+               priv->stats.rx_mac_errors++;
+       } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
+               priv->stats.rx_truncate_errors++;
+       }
+
+       /* Let hardware know we've replenished one buffer */
+       rx_pi++;
+
+       /* Ensure completion of all writes before notifying HW of replenish */
+       wmb();
+       writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
+
+       (*rx_pkts)++;
+
+       rx_pi_rem = rx_pi % priv->rx_q_entries;
+       if (rx_pi_rem == 0)
+               priv->valid_polarity ^= 1;
+       rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
+       rx_ci_rem = rx_ci % priv->rx_q_entries;
+
+       if (skb)
+               netif_receive_skb(skb);
+
+       return rx_pi_rem != rx_ci_rem;
+}
+
+/* Driver poll() function called by NAPI infrastructure */
+int mlxbf_gige_poll(struct napi_struct *napi, int budget)
+{
+       struct mlxbf_gige *priv;
+       bool remaining_pkts;
+       int work_done = 0;
+       u64 data;
+
+       priv = container_of(napi, struct mlxbf_gige, napi);
+
+       mlxbf_gige_handle_tx_complete(priv);
+
+       do {
+               remaining_pkts = mlxbf_gige_rx_packet(priv, &work_done);
+       } while (remaining_pkts && work_done < budget);
+
+       /* If amount of work done < budget, turn off NAPI polling
+        * via napi_complete_done(napi, work_done) and then
+        * re-enable interrupts.
+        */
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
+               /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
+                * indicate receive readiness
+                */
+               data = readq(priv->base + MLXBF_GIGE_INT_MASK);
+               data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
+               writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
+       }
+
+       return work_done;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
new file mode 100644 (file)
index 0000000..04982e8
--- /dev/null
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+
+/* Packet transmit logic for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
+ */
+
+#include <linux/skbuff.h>
+
+#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
+
+/* Transmit Initialization
+ * 1) Allocates TX WQE array using coherent DMA mapping
+ * 2) Allocates TX completion counter using coherent DMA mapping
+ */
+int mlxbf_gige_tx_init(struct mlxbf_gige *priv)
+{
+       size_t size;
+
+       size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
+       priv->tx_wqe_base = dma_alloc_coherent(priv->dev, size,
+                                              &priv->tx_wqe_base_dma,
+                                              GFP_KERNEL);
+       if (!priv->tx_wqe_base)
+               return -ENOMEM;
+
+       priv->tx_wqe_next = priv->tx_wqe_base;
+
+       /* Write TX WQE base address into MMIO reg */
+       writeq(priv->tx_wqe_base_dma, priv->base + MLXBF_GIGE_TX_WQ_BASE);
+
+       /* Allocate address for TX completion count */
+       priv->tx_cc = dma_alloc_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
+                                        &priv->tx_cc_dma, GFP_KERNEL);
+       if (!priv->tx_cc) {
+               dma_free_coherent(priv->dev, size,
+                                 priv->tx_wqe_base, priv->tx_wqe_base_dma);
+               return -ENOMEM;
+       }
+
+       /* Write TX CC base address into MMIO reg */
+       writeq(priv->tx_cc_dma, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
+
+       writeq(ilog2(priv->tx_q_entries),
+              priv->base + MLXBF_GIGE_TX_WQ_SIZE_LOG2);
+
+       priv->prev_tx_ci = 0;
+       priv->tx_pi = 0;
+
+       return 0;
+}
+
+/* Transmit Deinitialization
+ * This routine will free allocations done by mlxbf_gige_tx_init(),
+ * namely the TX WQE array and the TX completion counter
+ */
+void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
+{
+       u64 *tx_wqe_addr;
+       size_t size;
+       int i;
+
+       tx_wqe_addr = priv->tx_wqe_base;
+
+       for (i = 0; i < priv->tx_q_entries; i++) {
+               if (priv->tx_skb[i]) {
+                       dma_unmap_single(priv->dev, *tx_wqe_addr,
+                                        priv->tx_skb[i]->len, DMA_TO_DEVICE);
+                       dev_kfree_skb(priv->tx_skb[i]);
+                       priv->tx_skb[i] = NULL;
+               }
+               tx_wqe_addr += 2;
+       }
+
+       size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
+       dma_free_coherent(priv->dev, size,
+                         priv->tx_wqe_base, priv->tx_wqe_base_dma);
+
+       dma_free_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
+                         priv->tx_cc, priv->tx_cc_dma);
+
+       priv->tx_wqe_base = NULL;
+       priv->tx_wqe_base_dma = 0;
+       priv->tx_cc = NULL;
+       priv->tx_cc_dma = 0;
+       priv->tx_wqe_next = NULL;
+       writeq(0, priv->base + MLXBF_GIGE_TX_WQ_BASE);
+       writeq(0, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
+}
+
+/* Function that returns status of TX ring:
+ *          0: TX ring is full, i.e. there are no
+ *             available un-used entries in TX ring.
+ *   non-null: TX ring is not full, i.e. there are
+ *             some available entries in TX ring.
+ *             The non-null value is a measure of
+ *             how many TX entries are available, but
+ *             it is not the exact number of available
+ *             entries (see below).
+ *
+ * The algorithm makes the assumption that if
+ * (prev_tx_ci == tx_pi) then the TX ring is empty.
+ * An empty ring actually has (tx_q_entries-1)
+ * entries, which allows the algorithm to differentiate
+ * the case of an empty ring vs. a full ring.
+ */
+static u16 mlxbf_gige_tx_buffs_avail(struct mlxbf_gige *priv)
+{
+       unsigned long flags;
+       u16 avail;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       if (priv->prev_tx_ci == priv->tx_pi)
+               avail = priv->tx_q_entries - 1;
+       else
+               avail = ((priv->tx_q_entries + priv->prev_tx_ci - priv->tx_pi)
+                         % priv->tx_q_entries) - 1;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       return avail;
+}
+
+bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
+{
+       struct net_device_stats *stats;
+       u16 tx_wqe_index;
+       u64 *tx_wqe_addr;
+       u64 tx_status;
+       u16 tx_ci;
+
+       tx_status = readq(priv->base + MLXBF_GIGE_TX_STATUS);
+       if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL)
+               priv->stats.tx_fifo_full++;
+       tx_ci = readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX);
+       stats = &priv->netdev->stats;
+
+       /* Transmit completion logic needs to loop until the completion
+        * index (in SW) equals TX consumer index (from HW).  These
+        * parameters are unsigned 16-bit values and the wrap case needs
+        * to be supported, that is TX consumer index wrapped from 0xFFFF
+        * to 0 while TX completion index is still < 0xFFFF.
+        */
+       for (; priv->prev_tx_ci != tx_ci; priv->prev_tx_ci++) {
+               tx_wqe_index = priv->prev_tx_ci % priv->tx_q_entries;
+               /* Each TX WQE is 16 bytes. The 8 MSB store the 2KB TX
+                * buffer address and the 8 LSB contain information
+                * about the TX WQE.
+                */
+               tx_wqe_addr = priv->tx_wqe_base +
+                              (tx_wqe_index * MLXBF_GIGE_TX_WQE_SZ_QWORDS);
+
+               stats->tx_packets++;
+               stats->tx_bytes += MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr);
+
+               dma_unmap_single(priv->dev, *tx_wqe_addr,
+                                priv->tx_skb[tx_wqe_index]->len, DMA_TO_DEVICE);
+               dev_consume_skb_any(priv->tx_skb[tx_wqe_index]);
+               priv->tx_skb[tx_wqe_index] = NULL;
+
+               /* Ensure completion of updates across all cores */
+               mb();
+       }
+
+       /* Since the TX ring was likely just drained, check if TX queue
+        * had previously been stopped and now that there are TX buffers
+        * available the TX queue can be awakened.
+        */
+       if (netif_queue_stopped(priv->netdev) &&
+           mlxbf_gige_tx_buffs_avail(priv))
+               netif_wake_queue(priv->netdev);
+
+       return true;
+}
+
+/* Function to advance the tx_wqe_next pointer to next TX WQE */
+void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv)
+{
+       /* Advance tx_wqe_next pointer */
+       priv->tx_wqe_next += MLXBF_GIGE_TX_WQE_SZ_QWORDS;
+
+       /* Check if 'next' pointer is beyond end of TX ring */
+       /* If so, set 'next' back to 'base' pointer of ring */
+       if (priv->tx_wqe_next == (priv->tx_wqe_base +
+                                 (priv->tx_q_entries * MLXBF_GIGE_TX_WQE_SZ_QWORDS)))
+               priv->tx_wqe_next = priv->tx_wqe_base;
+}
+
+netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
+                                 struct net_device *netdev)
+{
+       struct mlxbf_gige *priv = netdev_priv(netdev);
+       long buff_addr, start_dma_page, end_dma_page;
+       struct sk_buff *tx_skb;
+       dma_addr_t tx_buf_dma;
+       unsigned long flags;
+       u64 *tx_wqe_addr;
+       u64 word2;
+
+       /* If needed, linearize TX SKB as hardware DMA expects this */
+       if (skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ || skb_linearize(skb)) {
+               dev_kfree_skb(skb);
+               netdev->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
+
+       buff_addr = (long)skb->data;
+       start_dma_page = buff_addr >> MLXBF_GIGE_DMA_PAGE_SHIFT;
+       end_dma_page   = (buff_addr + skb->len - 1) >> MLXBF_GIGE_DMA_PAGE_SHIFT;
+
+       /* Verify that payload pointer and data length of SKB to be
+        * transmitted does not violate the hardware DMA limitation.
+        */
+       if (start_dma_page != end_dma_page) {
+               /* DMA operation would fail as-is, alloc new aligned SKB */
+               tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
+                                             &tx_buf_dma, DMA_TO_DEVICE);
+               if (!tx_skb) {
+                       /* Free original skb, could not alloc new aligned SKB */
+                       dev_kfree_skb(skb);
+                       netdev->stats.tx_dropped++;
+                       return NETDEV_TX_OK;
+               }
+
+               skb_put_data(tx_skb, skb->data, skb->len);
+
+               /* Free the original SKB */
+               dev_kfree_skb(skb);
+       } else {
+               tx_skb = skb;
+               tx_buf_dma = dma_map_single(priv->dev, skb->data,
+                                           skb->len, DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->dev, tx_buf_dma)) {
+                       dev_kfree_skb(skb);
+                       netdev->stats.tx_dropped++;
+                       return NETDEV_TX_OK;
+               }
+       }
+
+       /* Get address of TX WQE */
+       tx_wqe_addr = priv->tx_wqe_next;
+
+       mlxbf_gige_update_tx_wqe_next(priv);
+
+       /* Put PA of buffer address into first 64-bit word of TX WQE */
+       *tx_wqe_addr = tx_buf_dma;
+
+       /* Set TX WQE pkt_len appropriately
+        * NOTE: GigE silicon will automatically pad up to
+        *       minimum packet length if needed.
+        */
+       word2 = tx_skb->len & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK;
+
+       /* Write entire 2nd word of TX WQE */
+       *(tx_wqe_addr + 1) = word2;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
+       priv->tx_pi++;
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!netdev_xmit_more()) {
+               /* Create memory barrier before write to TX PI */
+               wmb();
+               writeq(priv->tx_pi, priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX);
+       }
+
+       /* Check if the last TX entry was just used */
+       if (!mlxbf_gige_tx_buffs_avail(priv)) {
+               /* TX ring is full, inform stack */
+               netif_stop_queue(netdev);
+
+               /* Since there is no separate "TX complete" interrupt, need
+                * to explicitly schedule NAPI poll.  This will trigger logic
+                * which processes TX completions, and will hopefully drain
+                * the TX ring allowing the TX queue to be awakened.
+                */
+               napi_schedule(&priv->napi);
+       }
+
+       return NETDEV_TX_OK;
+}
index b3ca5bd..3713c45 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/kernel.h>
 #include <linux/err.h>
+#include <linux/ethtool.h>
 #include <linux/sfp.h>
 
 #include "core.h"
@@ -25,8 +26,8 @@ struct mlxsw_env {
 static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
                                          bool *qsfp, bool *cmis)
 {
-       char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
        char mcia_pl[MLXSW_REG_MCIA_LEN];
+       char *eeprom_tmp;
        u8 ident;
        int err;
 
@@ -35,7 +36,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
        err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
        if (err)
                return err;
-       mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+       eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
        ident = eeprom_tmp[0];
        *cmis = false;
        switch (ident) {
@@ -63,8 +64,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
                              u16 offset, u16 size, void *data,
                              bool qsfp, unsigned int *p_read_size)
 {
-       char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
        char mcia_pl[MLXSW_REG_MCIA_LEN];
+       char *eeprom_tmp;
        u16 i2c_addr;
        u8 page = 0;
        int status;
@@ -115,7 +116,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
        if (status)
                return -EIO;
 
-       mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+       eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
        memcpy(data, eeprom_tmp, size);
        *p_read_size = size;
 
@@ -126,13 +127,13 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
                                         int off, int *temp)
 {
        unsigned int module_temp, module_crit, module_emerg;
-       char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
        union {
                u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE];
                u16 temp;
        } temp_thresh;
        char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
        char mtmp_pl[MLXSW_REG_MTMP_LEN];
+       char *eeprom_tmp;
        bool qsfp, cmis;
        int page;
        int err;
@@ -194,7 +195,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
        if (err)
                return err;
 
-       mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
+       eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
        memcpy(temp_thresh.buf, eeprom_tmp, MLXSW_REG_MCIA_TH_ITEM_SIZE);
        *temp = temp_thresh.temp * 1000;
 
@@ -315,6 +316,79 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
 }
 EXPORT_SYMBOL(mlxsw_env_get_module_eeprom);
 
+static int mlxsw_env_mcia_status_process(const char *mcia_pl,
+                                        struct netlink_ext_ack *extack)
+{
+       u8 status = mlxsw_reg_mcia_status_get(mcia_pl);
+
+       switch (status) {
+       case MLXSW_REG_MCIA_STATUS_GOOD:
+               return 0;
+       case MLXSW_REG_MCIA_STATUS_NO_EEPROM_MODULE:
+               NL_SET_ERR_MSG_MOD(extack, "No response from module's EEPROM");
+               return -EIO;
+       case MLXSW_REG_MCIA_STATUS_MODULE_NOT_SUPPORTED:
+               NL_SET_ERR_MSG_MOD(extack, "Module type not supported by the device");
+               return -EOPNOTSUPP;
+       case MLXSW_REG_MCIA_STATUS_MODULE_NOT_CONNECTED:
+               NL_SET_ERR_MSG_MOD(extack, "No module present indication");
+               return -EIO;
+       case MLXSW_REG_MCIA_STATUS_I2C_ERROR:
+               NL_SET_ERR_MSG_MOD(extack, "Error occurred while trying to access module's EEPROM using I2C");
+               return -EIO;
+       case MLXSW_REG_MCIA_STATUS_MODULE_DISABLED:
+               NL_SET_ERR_MSG_MOD(extack, "Module is disabled");
+               return -EIO;
+       default:
+               NL_SET_ERR_MSG_MOD(extack, "Unknown error");
+               return -EIO;
+       }
+}
+
+int
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+                                   const struct ethtool_module_eeprom *page,
+                                   struct netlink_ext_ack *extack)
+{
+       u32 bytes_read = 0;
+       u16 device_addr;
+
+       /* Offset cannot be larger than 2 * ETH_MODULE_EEPROM_PAGE_LEN */
+       device_addr = page->offset;
+
+       while (bytes_read < page->length) {
+               char mcia_pl[MLXSW_REG_MCIA_LEN];
+               char *eeprom_tmp;
+               u8 size;
+               int err;
+
+               size = min_t(u8, page->length - bytes_read,
+                            MLXSW_REG_MCIA_EEPROM_SIZE);
+
+               mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page,
+                                   device_addr + bytes_read, size,
+                                   page->i2c_address);
+               mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
+
+               err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+               if (err) {
+                       NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM");
+                       return err;
+               }
+
+               err = mlxsw_env_mcia_status_process(mcia_pl, extack);
+               if (err)
+                       return err;
+
+               eeprom_tmp = mlxsw_reg_mcia_eeprom_data(mcia_pl);
+               memcpy(page->data + bytes_read, eeprom_tmp, size);
+               bytes_read += size;
+       }
+
+       return bytes_read;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+
 static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
                                            u8 module,
                                            bool *p_has_temp_sensor)
index 2b23f8a..0bf5bd0 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _MLXSW_CORE_ENV_H
 #define _MLXSW_CORE_ENV_H
 
+#include <linux/ethtool.h>
+
 struct ethtool_modinfo;
 struct ethtool_eeprom;
 
@@ -18,6 +20,11 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
                                struct ethtool_eeprom *ee, u8 *data);
 
 int
+mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
+                                   const struct ethtool_module_eeprom *page,
+                                   struct netlink_ext_ack *extack);
+
+int
 mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
                                      u64 *p_counter);
 int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
index 677a53f..0998dcc 100644 (file)
@@ -719,7 +719,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
                                                        MLXSW_THERMAL_TRIP_MASK,
                                                        module_tz,
                                                        &mlxsw_thermal_module_ops,
-                                                       NULL, 0, 0);
+                                                       NULL, 0,
+                                                       module_tz->parent->polling_delay);
        if (IS_ERR(module_tz->tzdev)) {
                err = PTR_ERR(module_tz->tzdev);
                return err;
@@ -848,7 +849,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
                                                MLXSW_THERMAL_TRIP_MASK,
                                                gearbox_tz,
                                                &mlxsw_thermal_gearbox_ops,
-                                               NULL, 0, 0);
+                                               NULL, 0,
+                                               gearbox_tz->parent->polling_delay);
        if (IS_ERR(gearbox_tz->tzdev))
                return PTR_ERR(gearbox_tz->tzdev);
 
index 6810272..d9d56c4 100644 (file)
@@ -112,10 +112,23 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
                                           ee, data);
 }
 
+static int
+mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
+                                 const struct ethtool_module_eeprom *page,
+                                 struct netlink_ext_ack *extack)
+{
+       struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+       struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+       return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module,
+                                                  page, extack);
+}
+
 static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
        .get_drvinfo            = mlxsw_m_module_get_drvinfo,
        .get_module_info        = mlxsw_m_get_module_info,
        .get_module_eeprom      = mlxsw_m_get_module_eeprom,
+       .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
 };
 
 static int
index 5304309..6fbda6e 100644 (file)
@@ -3907,7 +3907,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
 #define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS       25
 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1    5
 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2    11
-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3    5
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3    11
 
 static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
                                       enum mlxsw_reg_qeec_hr hr, u8 index,
@@ -9690,6 +9690,20 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
  */
 MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8);
 
+enum {
+       MLXSW_REG_MCIA_STATUS_GOOD = 0,
+       /* No response from module's EEPROM. */
+       MLXSW_REG_MCIA_STATUS_NO_EEPROM_MODULE = 1,
+       /* Module type not supported by the device. */
+       MLXSW_REG_MCIA_STATUS_MODULE_NOT_SUPPORTED = 2,
+       /* No module present indication. */
+       MLXSW_REG_MCIA_STATUS_MODULE_NOT_CONNECTED = 3,
+       /* Error occurred while trying to access module's EEPROM using I2C. */
+       MLXSW_REG_MCIA_STATUS_I2C_ERROR = 9,
+       /* Module is disabled. */
+       MLXSW_REG_MCIA_STATUS_MODULE_DISABLED = 16,
+};
+
 /* reg_mcia_status
  * Module status.
  * Access: RO
@@ -9714,6 +9728,12 @@ MLXSW_ITEM32(reg, mcia, page_number, 0x04, 16, 8);
  */
 MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
 
+/* reg_mcia_bank_number
+ * Bank number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcia, bank_number, 0x08, 16, 8);
+
 /* reg_mcia_size
  * Number of bytes to read/write (up to 48 bytes).
  * Access: RW
index c8061be..267590a 100644 (file)
@@ -1051,6 +1051,19 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
 }
 
 static int
+mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
+                                  const struct ethtool_module_eeprom *page,
+                                  struct netlink_ext_ack *extack)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u8 module = mlxsw_sp_port->mapping.module;
+
+       return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page,
+                                                  extack);
+}
+
+static int
 mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
@@ -1199,6 +1212,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
        .set_link_ksettings             = mlxsw_sp_port_set_link_ksettings,
        .get_module_info                = mlxsw_sp_get_module_info,
        .get_module_eeprom              = mlxsw_sp_get_module_eeprom,
+       .get_module_eeprom_by_page      = mlxsw_sp_get_module_eeprom_by_page,
        .get_ts_info                    = mlxsw_sp_get_ts_info,
        .get_eth_phy_stats              = mlxsw_sp_get_eth_phy_stats,
        .get_eth_mac_stats              = mlxsw_sp_get_eth_mac_stats,
index 04672eb..9958d50 100644 (file)
@@ -1332,6 +1332,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
                           u8 band, u32 child_handle)
 {
        struct mlxsw_sp_qdisc *old_qdisc;
+       u32 parent;
 
        if (band < mlxsw_sp_qdisc->num_classes &&
            mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
@@ -1352,7 +1353,9 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
        if (old_qdisc)
                mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
 
-       mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc, band);
+       parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
+       mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
+                                                        parent);
        if (!WARN_ON(!mlxsw_sp_qdisc))
                mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
 
index 0cfba29..c5ef9aa 100644 (file)
@@ -898,7 +898,7 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return 0;
 }
 
-static int mlxsw_sp_port_attr_set(struct net_device *dev,
+static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
                                  const struct switchdev_attr *attr,
                                  struct netlink_ext_ack *extack)
 {
@@ -1766,7 +1766,7 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 }
 
-static int mlxsw_sp_port_obj_add(struct net_device *dev,
+static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
                                 const struct switchdev_obj *obj,
                                 struct netlink_ext_ack *extack)
 {
@@ -1916,7 +1916,7 @@ mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 }
 
-static int mlxsw_sp_port_obj_del(struct net_device *dev,
+static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
                                 const struct switchdev_obj *obj)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
index d0f6dfe..d54aa16 100644 (file)
@@ -54,4 +54,6 @@ config LAN743X
          To compile this driver as a module, choose M here. The module will be
          called lan743x.
 
+source "drivers/net/ethernet/microchip/sparx5/Kconfig"
+
 endif # NET_VENDOR_MICROCHIP
index da60354..c77dc03 100644 (file)
@@ -8,3 +8,5 @@ obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
 obj-$(CONFIG_LAN743X) += lan743x.o
 
 lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
new file mode 100644 (file)
index 0000000..a80419d
--- /dev/null
@@ -0,0 +1,9 @@
+config SPARX5_SWITCH
+       tristate "Sparx5 switch driver"
+       depends on NET_SWITCHDEV
+       depends on HAS_IOMEM
+       select PHYLINK
+       select PHY_SPARX5_SERDES
+       select RESET_CONTROLLER
+       help
+         This driver supports the Sparx5 network switch device.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
new file mode 100644 (file)
index 0000000..faa8f07
--- /dev/null
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Sparx5 network device drivers.
+#
+
+obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
+
+sparx5-switch-objs  := sparx5_main.o sparx5_packet.o \
+ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
+ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
new file mode 100644 (file)
index 0000000..76a8bb5
--- /dev/null
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* QSYS calendar information */
+#define SPX5_PORTS_PER_CALREG          10  /* Ports mapped in a calendar register */
+#define SPX5_CALBITS_PER_PORT          3   /* Bit per port in calendar register */
+
+/* DSM calendar information */
+#define SPX5_DSM_CAL_LEN               64
+#define SPX5_DSM_CAL_EMPTY             0xFFFF
+#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
+#define SPX5_DSM_CAL_TAXIS             8
+#define SPX5_DSM_CAL_BW_LOSS           553
+
+#define SPX5_TAXI_PORT_MAX             70
+
+#define SPEED_12500                    12500
+
+/* Maps from taxis to port numbers */
+static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
+       {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
+       {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
+       {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
+       {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
+       {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+       {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+       {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+       {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
+};
+
+struct sparx5_calendar_data {
+       u32 schedule[SPX5_DSM_CAL_LEN];
+       u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+       u32 new_slots[SPX5_DSM_CAL_LEN];
+       u32 temp_sched[SPX5_DSM_CAL_LEN];
+       u32 indices[SPX5_DSM_CAL_LEN];
+       u32 short_list[SPX5_DSM_CAL_LEN];
+       u32 long_list[SPX5_DSM_CAL_LEN];
+};
+
+static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
+{
+       switch (sparx5->target_ct) {
+       case SPX5_TARGET_CT_7546:
+       case SPX5_TARGET_CT_7546TSN:
+               return 65000;
+       case SPX5_TARGET_CT_7549:
+       case SPX5_TARGET_CT_7549TSN:
+               return 91000;
+       case SPX5_TARGET_CT_7552:
+       case SPX5_TARGET_CT_7552TSN:
+               return 129000;
+       case SPX5_TARGET_CT_7556:
+       case SPX5_TARGET_CT_7556TSN:
+               return 161000;
+       case SPX5_TARGET_CT_7558:
+       case SPX5_TARGET_CT_7558TSN:
+               return 201000;
+       default:
+               return 0;
+       }
+}
+
+/* This is used in calendar configuration */
+enum sparx5_cal_bw {
+       SPX5_CAL_SPEED_NONE = 0,
+       SPX5_CAL_SPEED_1G   = 1,
+       SPX5_CAL_SPEED_2G5  = 2,
+       SPX5_CAL_SPEED_5G   = 3,
+       SPX5_CAL_SPEED_10G  = 4,
+       SPX5_CAL_SPEED_25G  = 5,
+       SPX5_CAL_SPEED_0G5  = 6,
+       SPX5_CAL_SPEED_12G5 = 7
+};
+
+static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
+{
+       switch (cclock) {
+       case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
+       case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
+       case SPX5_CORE_CLOCK_625MHZ: return  208000; /* 625000 / 3 */
+       default: return 0;
+       }
+       return 0;
+}
+
+static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
+{
+       switch (speed) {
+       case SPX5_CAL_SPEED_1G:   return 1000;
+       case SPX5_CAL_SPEED_2G5:  return 2500;
+       case SPX5_CAL_SPEED_5G:   return 5000;
+       case SPX5_CAL_SPEED_10G:  return 10000;
+       case SPX5_CAL_SPEED_25G:  return 25000;
+       case SPX5_CAL_SPEED_0G5:  return 500;
+       case SPX5_CAL_SPEED_12G5: return 12500;
+       default: return 0;
+       }
+}
+
+static u32 sparx5_bandwidth_to_calendar(u32 bw)
+{
+       switch (bw) {
+       case SPEED_10:      return SPX5_CAL_SPEED_0G5;
+       case SPEED_100:     return SPX5_CAL_SPEED_0G5;
+       case SPEED_1000:    return SPX5_CAL_SPEED_1G;
+       case SPEED_2500:    return SPX5_CAL_SPEED_2G5;
+       case SPEED_5000:    return SPX5_CAL_SPEED_5G;
+       case SPEED_10000:   return SPX5_CAL_SPEED_10G;
+       case SPEED_12500:   return SPX5_CAL_SPEED_12G5;
+       case SPEED_25000:   return SPX5_CAL_SPEED_25G;
+       case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
+       default:            return SPX5_CAL_SPEED_NONE;
+       }
+}
+
+static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
+                                                   u32 portno)
+{
+       struct sparx5_port *port;
+
+       if (portno >= SPX5_PORTS) {
+               /* Internal ports */
+               if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) {
+                       /* Equals 1.25G */
+                       return SPX5_CAL_SPEED_2G5;
+               } else if (portno == SPX5_PORT_VD0) {
+                       /* IPMC only idle BW */
+                       return SPX5_CAL_SPEED_NONE;
+               } else if (portno == SPX5_PORT_VD1) {
+                       /* OAM only idle BW */
+                       return SPX5_CAL_SPEED_NONE;
+               } else if (portno == SPX5_PORT_VD2) {
+                       /* IPinIP gets only idle BW */
+                       return SPX5_CAL_SPEED_NONE;
+               }
+               /* not in port map */
+               return SPX5_CAL_SPEED_NONE;
+       }
+       /* Front ports - may be used */
+       port = sparx5->ports[portno];
+       if (!port)
+               return SPX5_CAL_SPEED_NONE;
+       return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
+}
+
+/* Auto configure the QSYS calendar based on port configuration */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5)
+{
+       u32 cal[7], value, idx, portno;
+       u32 max_core_bw;
+       u32 total_bw = 0, used_port_bw = 0;
+       int err = 0;
+       enum sparx5_cal_bw spd;
+
+       memset(cal, 0, sizeof(cal));
+
+       max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
+       if (max_core_bw == 0) {
+               dev_err(sparx5->dev, "Core clock not supported");
+               return -EINVAL;
+       }
+
+       /* Setup the calendar with the bandwidth to each port */
+       for (portno = 0; portno < SPX5_PORTS_ALL; portno++) {
+               u64 reg, offset, this_bw;
+
+               spd = sparx5_get_port_cal_speed(sparx5, portno);
+               if (spd == SPX5_CAL_SPEED_NONE)
+                       continue;
+
+               this_bw = sparx5_cal_speed_to_value(spd);
+               if (portno < SPX5_PORTS)
+                       used_port_bw += this_bw;
+               else
+                       /* Internal ports are granted half the value */
+                       this_bw = this_bw / 2;
+               total_bw += this_bw;
+               reg = portno;
+               offset = do_div(reg, SPX5_PORTS_PER_CALREG);
+               cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
+       }
+
+       if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
+               dev_err(sparx5->dev,
+                       "Port BW %u above target BW %u\n",
+                       used_port_bw, sparx5_target_bandwidth(sparx5));
+               return -EINVAL;
+       }
+
+       if (total_bw > max_core_bw) {
+               dev_err(sparx5->dev,
+                       "Total BW %u above switch core BW %u\n",
+                       total_bw, max_core_bw);
+               return -EINVAL;
+       }
+
+       /* Halt the calendar while changing it */
+       spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
+                QSYS_CAL_CTRL_CAL_MODE,
+                sparx5, QSYS_CAL_CTRL);
+
+       /* Assign port bandwidth to auto calendar */
+       for (idx = 0; idx < ARRAY_SIZE(cal); idx++)
+               spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
+
+       /* Increase grant rate of all ports to account for
+        * core clock ppm deviations
+        */
+       spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
+                QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
+                sparx5,
+                QSYS_CAL_CTRL);
+
+       /* Grant idle usage to VD 0-2 */
+       for (idx = 2; idx < 5; idx++)
+               spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
+                       sparx5,
+                       HSCH_OUTB_SHARE_ENA(idx));
+
+       /* Enable Auto mode */
+       spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
+                QSYS_CAL_CTRL_CAL_MODE,
+                sparx5, QSYS_CAL_CTRL);
+
+       /* Verify successful calendar config */
+       value = spx5_rd(sparx5, QSYS_CAL_CTRL);
+       if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
+               dev_err(sparx5->dev, "QSYS calendar error\n");
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
+{
+       if (b == 0)
+               return a;
+       return sparx5_dsm_exb_gcd(b, a % b);
+}
+
+static u32 sparx5_dsm_cal_len(u32 *cal)
+{
+       u32 idx = 0, len = 0;
+
+       while (idx < SPX5_DSM_CAL_LEN) {
+               if (cal[idx] != SPX5_DSM_CAL_EMPTY)
+                       len++;
+               idx++;
+       }
+       return len;
+}
+
+static u32 sparx5_dsm_cp_cal(u32 *sched)
+{
+       u32 idx = 0, tmp;
+
+       while (idx < SPX5_DSM_CAL_LEN) {
+               if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
+                       tmp = sched[idx];
+                       sched[idx] = SPX5_DSM_CAL_EMPTY;
+                       return tmp;
+               }
+               idx++;
+       }
+       return SPX5_DSM_CAL_EMPTY;
+}
+
+static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+                                   struct sparx5_calendar_data *data)
+{
+       bool slow_mode;
+       u32 gcd, idx, sum, min, factor;
+       u32 num_of_slots, slot_spd, empty_slots;
+       u32 taxi_bw, clk_period_ps;
+
+       clk_period_ps = sparx5_clk_period(sparx5->coreclock);
+       taxi_bw = 128 * 1000000 / clk_period_ps;
+       slow_mode = !!(clk_period_ps > 2000);
+       memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
+              sizeof(data->taxi_ports));
+
+       for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+               data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
+               data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
+               data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
+       }
+       /* Default empty calendar */
+       data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+       /* Map ports to taxi positions */
+       for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
+               u32 portno = data->taxi_ports[idx];
+
+               if (portno < SPX5_TAXI_PORT_MAX) {
+                       data->taxi_speeds[idx] = sparx5_cal_speed_to_value
+                               (sparx5_get_port_cal_speed(sparx5, portno));
+               } else {
+                       data->taxi_speeds[idx] = 0;
+               }
+       }
+
+       sum = 0;
+       min = 25000;
+       for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+               u32 jdx;
+
+               sum += data->taxi_speeds[idx];
+               if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
+                       min = data->taxi_speeds[idx];
+               gcd = min;
+               for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
+                       gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
+       }
+       if (sum == 0) /* Empty calendar */
+               return 0;
+       /* Make room for overhead traffic */
+       factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
+
+       if (sum * factor > (taxi_bw * 1000)) {
+               dev_err(sparx5->dev,
+                       "Taxi %u, Requested BW %u above available BW %u\n",
+                       taxi, sum, taxi_bw);
+               return -EINVAL;
+       }
+       for (idx = 0; idx < 4; idx++) {
+               u32 raw_spd;
+
+               if (idx == 0)
+                       raw_spd = gcd / 5;
+               else if (idx == 1)
+                       raw_spd = gcd / 2;
+               else if (idx == 2)
+                       raw_spd = gcd;
+               else
+                       raw_spd = min;
+               slot_spd = raw_spd * factor / 1000;
+               num_of_slots = taxi_bw / slot_spd;
+               if (num_of_slots <= 64)
+                       break;
+       }
+
+       num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
+       slot_spd = taxi_bw / num_of_slots;
+
+       sum = 0;
+       for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
+               u32 spd = data->taxi_speeds[idx];
+               u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
+
+               if (adjusted_speed > 0) {
+                       data->avg_dist[idx] = (128 * 1000000 * 10) /
+                               (adjusted_speed * clk_period_ps);
+               } else {
+                       data->avg_dist[idx] = -1;
+               }
+               data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
+               if (spd != 25000 && (spd != 10000 || !slow_mode)) {
+                       if (num_of_slots < (5 * data->dev_slots[idx])) {
+                               dev_err(sparx5->dev,
+                                       "Taxi %u, speed %u, Low slot sep.\n",
+                                       taxi, spd);
+                               return -EINVAL;
+                       }
+               }
+               sum += data->dev_slots[idx];
+               if (sum > num_of_slots) {
+                       dev_err(sparx5->dev,
+                               "Taxi %u with overhead factor %u\n",
+                               taxi, factor);
+                       return -EINVAL;
+               }
+       }
+
+       empty_slots = num_of_slots - sum;
+
+       for (idx = 0; idx < empty_slots; idx++)
+               data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
+
+       for (idx = 1; idx < num_of_slots; idx++) {
+               u32 indices_len = 0;
+               u32 slot, jdx, kdx, ts;
+               s32 cnt;
+               u32 num_of_old_slots, num_of_new_slots, tgt_score;
+
+               for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
+                       if (data->dev_slots[slot] == idx) {
+                               data->indices[indices_len] = slot;
+                               indices_len++;
+                       }
+               }
+               if (indices_len == 0)
+                       continue;
+               kdx = 0;
+               for (slot = 0; slot < idx; slot++) {
+                       for (jdx = 0; jdx < indices_len; jdx++, kdx++)
+                               data->new_slots[kdx] = data->indices[jdx];
+               }
+
+               for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+                       if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
+                               break;
+               }
+
+               num_of_old_slots =  slot;
+               num_of_new_slots =  kdx;
+               cnt = 0;
+               ts = 0;
+
+               if (num_of_new_slots > num_of_old_slots) {
+                       memcpy(data->short_list, data->schedule,
+                              sizeof(data->short_list));
+                       memcpy(data->long_list, data->new_slots,
+                              sizeof(data->long_list));
+                       tgt_score = 100000 * num_of_old_slots /
+                               num_of_new_slots;
+               } else {
+                       memcpy(data->short_list, data->new_slots,
+                              sizeof(data->short_list));
+                       memcpy(data->long_list, data->schedule,
+                              sizeof(data->long_list));
+                       tgt_score = 100000 * num_of_new_slots /
+                               num_of_old_slots;
+               }
+
+               while (sparx5_dsm_cal_len(data->short_list) > 0 ||
+                      sparx5_dsm_cal_len(data->long_list) > 0) {
+                       u32 act = 0;
+
+                       if (sparx5_dsm_cal_len(data->short_list) > 0) {
+                               data->temp_sched[ts] =
+                                       sparx5_dsm_cp_cal(data->short_list);
+                               ts++;
+                               cnt += 100000;
+                               act = 1;
+                       }
+                       while (sparx5_dsm_cal_len(data->long_list) > 0 &&
+                              cnt > 0) {
+                               data->temp_sched[ts] =
+                                       sparx5_dsm_cp_cal(data->long_list);
+                               ts++;
+                               cnt -= tgt_score;
+                               act = 1;
+                       }
+                       if (act == 0) {
+                               dev_err(sparx5->dev,
+                                       "Error in DSM calendar calculation\n");
+                               return -EINVAL;
+                       }
+               }
+
+               for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+                       if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
+                               break;
+               }
+               for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
+                       data->schedule[slot] = data->temp_sched[slot];
+                       data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
+                       data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
+               }
+       }
+       return 0;
+}
+
+static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
+                                    struct sparx5_calendar_data *data)
+{
+       u32 num_of_slots, idx, port;
+       int cnt, max_dist;
+       u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
+       u32 cal_length = sparx5_dsm_cal_len(data->schedule);
+
+       for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
+               num_of_slots = 0;
+               max_dist = data->avg_dist[port];
+               for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
+                       slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
+                       distances[idx] = SPX5_DSM_CAL_EMPTY;
+               }
+
+               for (idx = 0; idx < cal_length; idx++) {
+                       if (data->schedule[idx] == port) {
+                               slot_indices[num_of_slots] = idx;
+                               num_of_slots++;
+                       }
+               }
+
+               slot_indices[num_of_slots] = slot_indices[0] + cal_length;
+
+               for (idx = 0; idx < num_of_slots; idx++) {
+                       distances[idx] = (slot_indices[idx + 1] -
+                                         slot_indices[idx]) * 10;
+               }
+
+               for (idx = 0; idx < num_of_slots; idx++) {
+                       u32 jdx, kdx;
+
+                       cnt = distances[idx] - max_dist;
+                       if (cnt < 0)
+                               cnt = -cnt;
+                       kdx = 0;
+                       for (jdx = (idx + 1) % num_of_slots;
+                            jdx != idx;
+                            jdx = (jdx + 1) % num_of_slots, kdx++) {
+                               cnt =  cnt + distances[jdx] - max_dist;
+                               if (cnt < 0)
+                                       cnt = -cnt;
+                               if (cnt > max_dist)
+                                       goto check_err;
+                       }
+               }
+       }
+       return 0;
+check_err:
+       dev_err(sparx5->dev,
+               "Port %u: distance %u above limit %d\n",
+               port, cnt, max_dist);
+       return -EINVAL;
+}
+
+static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
+                                     struct sparx5_calendar_data *data)
+{
+       u32 idx;
+       u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
+
+       spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
+               sparx5,
+               DSM_TAXI_CAL_CFG(taxi));
+       for (idx = 0; idx < cal_len; idx++) {
+               spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
+                        DSM_TAXI_CAL_CFG_CAL_IDX,
+                        sparx5,
+                        DSM_TAXI_CAL_CFG(taxi));
+               spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
+                        DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
+                        sparx5,
+                        DSM_TAXI_CAL_CFG(taxi));
+       }
+       spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
+               sparx5,
+               DSM_TAXI_CAL_CFG(taxi));
+       len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
+                                                      DSM_TAXI_CAL_CFG(taxi)));
+       if (len != cal_len - 1)
+               goto update_err;
+       return 0;
+update_err:
+       dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
+       return -EINVAL;
+}
+
+/* Configure the DSM calendar based on port configuration */
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
+{
+       int taxi;
+       struct sparx5_calendar_data *data;
+       int err = 0;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) {
+               err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
+               if (err) {
+                       dev_err(sparx5->dev, "DSM calendar calculation failed\n");
+                       goto cal_out;
+               }
+               err = sparx5_dsm_calendar_check(sparx5, data);
+               if (err) {
+                       dev_err(sparx5->dev, "DSM calendar check failed\n");
+                       goto cal_out;
+               }
+               err = sparx5_dsm_calendar_update(sparx5, taxi, data);
+               if (err) {
+                       dev_err(sparx5->dev, "DSM calendar update failed\n");
+                       goto cal_out;
+               }
+       }
+cal_out:
+       kfree(data);
+       return err;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
new file mode 100644 (file)
index 0000000..59783fc
--- /dev/null
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/ethtool.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+/* Index of ANA_AC port counters */
+#define SPX5_PORT_POLICER_DROPS 0
+
+/* Add a potentially wrapping 32 bit value to a 64 bit counter */
+static void sparx5_update_counter(u64 *cnt, u32 val)
+{
+       if (val < (*cnt & U32_MAX))
+               *cnt += (u64)1 << 32; /* value has wrapped */
+       *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+enum sparx5_stats_entry {
+       spx5_stats_rx_symbol_err_cnt = 0,
+       spx5_stats_pmac_rx_symbol_err_cnt = 1,
+       spx5_stats_tx_uc_cnt = 2,
+       spx5_stats_pmac_tx_uc_cnt = 3,
+       spx5_stats_tx_mc_cnt = 4,
+       spx5_stats_tx_bc_cnt = 5,
+       spx5_stats_tx_backoff1_cnt = 6,
+       spx5_stats_tx_multi_coll_cnt = 7,
+       spx5_stats_rx_uc_cnt = 8,
+       spx5_stats_pmac_rx_uc_cnt = 9,
+       spx5_stats_rx_mc_cnt = 10,
+       spx5_stats_rx_bc_cnt = 11,
+       spx5_stats_rx_crc_err_cnt = 12,
+       spx5_stats_pmac_rx_crc_err_cnt = 13,
+       spx5_stats_rx_alignment_lost_cnt = 14,
+       spx5_stats_pmac_rx_alignment_lost_cnt = 15,
+       spx5_stats_tx_ok_bytes_cnt = 16,
+       spx5_stats_pmac_tx_ok_bytes_cnt = 17,
+       spx5_stats_tx_defer_cnt = 18,
+       spx5_stats_tx_late_coll_cnt = 19,
+       spx5_stats_tx_xcoll_cnt = 20,
+       spx5_stats_tx_csense_cnt = 21,
+       spx5_stats_rx_ok_bytes_cnt = 22,
+       spx5_stats_pmac_rx_ok_bytes_cnt = 23,
+       spx5_stats_pmac_tx_mc_cnt = 24,
+       spx5_stats_pmac_tx_bc_cnt = 25,
+       spx5_stats_tx_xdefer_cnt = 26,
+       spx5_stats_pmac_rx_mc_cnt = 27,
+       spx5_stats_pmac_rx_bc_cnt = 28,
+       spx5_stats_rx_in_range_len_err_cnt = 29,
+       spx5_stats_pmac_rx_in_range_len_err_cnt = 30,
+       spx5_stats_rx_out_of_range_len_err_cnt = 31,
+       spx5_stats_pmac_rx_out_of_range_len_err_cnt = 32,
+       spx5_stats_rx_oversize_cnt = 33,
+       spx5_stats_pmac_rx_oversize_cnt = 34,
+       spx5_stats_tx_pause_cnt = 35,
+       spx5_stats_pmac_tx_pause_cnt = 36,
+       spx5_stats_rx_pause_cnt = 37,
+       spx5_stats_pmac_rx_pause_cnt = 38,
+       spx5_stats_rx_unsup_opcode_cnt = 39,
+       spx5_stats_pmac_rx_unsup_opcode_cnt = 40,
+       spx5_stats_rx_undersize_cnt = 41,
+       spx5_stats_pmac_rx_undersize_cnt = 42,
+       spx5_stats_rx_fragments_cnt = 43,
+       spx5_stats_pmac_rx_fragments_cnt = 44,
+       spx5_stats_rx_jabbers_cnt = 45,
+       spx5_stats_pmac_rx_jabbers_cnt = 46,
+       spx5_stats_rx_size64_cnt = 47,
+       spx5_stats_pmac_rx_size64_cnt = 48,
+       spx5_stats_rx_size65to127_cnt = 49,
+       spx5_stats_pmac_rx_size65to127_cnt = 50,
+       spx5_stats_rx_size128to255_cnt = 51,
+       spx5_stats_pmac_rx_size128to255_cnt = 52,
+       spx5_stats_rx_size256to511_cnt = 53,
+       spx5_stats_pmac_rx_size256to511_cnt = 54,
+       spx5_stats_rx_size512to1023_cnt = 55,
+       spx5_stats_pmac_rx_size512to1023_cnt = 56,
+       spx5_stats_rx_size1024to1518_cnt = 57,
+       spx5_stats_pmac_rx_size1024to1518_cnt = 58,
+       spx5_stats_rx_size1519tomax_cnt = 59,
+       spx5_stats_pmac_rx_size1519tomax_cnt = 60,
+       spx5_stats_tx_size64_cnt = 61,
+       spx5_stats_pmac_tx_size64_cnt = 62,
+       spx5_stats_tx_size65to127_cnt = 63,
+       spx5_stats_pmac_tx_size65to127_cnt = 64,
+       spx5_stats_tx_size128to255_cnt = 65,
+       spx5_stats_pmac_tx_size128to255_cnt = 66,
+       spx5_stats_tx_size256to511_cnt = 67,
+       spx5_stats_pmac_tx_size256to511_cnt = 68,
+       spx5_stats_tx_size512to1023_cnt = 69,
+       spx5_stats_pmac_tx_size512to1023_cnt = 70,
+       spx5_stats_tx_size1024to1518_cnt = 71,
+       spx5_stats_pmac_tx_size1024to1518_cnt = 72,
+       spx5_stats_tx_size1519tomax_cnt = 73,
+       spx5_stats_pmac_tx_size1519tomax_cnt = 74,
+       spx5_stats_mm_rx_assembly_err_cnt = 75,
+       spx5_stats_mm_rx_assembly_ok_cnt = 76,
+       spx5_stats_mm_rx_merge_frag_cnt = 77,
+       spx5_stats_mm_rx_smd_err_cnt = 78,
+       spx5_stats_mm_tx_pfragment_cnt = 79,
+       spx5_stats_rx_bad_bytes_cnt = 80,
+       spx5_stats_pmac_rx_bad_bytes_cnt = 81,
+       spx5_stats_rx_in_bytes_cnt = 82,
+       spx5_stats_rx_ipg_shrink_cnt = 83,
+       spx5_stats_rx_sync_lost_err_cnt = 84,
+       spx5_stats_rx_tagged_frms_cnt = 85,
+       spx5_stats_rx_untagged_frms_cnt = 86,
+       spx5_stats_tx_out_bytes_cnt = 87,
+       spx5_stats_tx_tagged_frms_cnt = 88,
+       spx5_stats_tx_untagged_frms_cnt = 89,
+       spx5_stats_rx_hih_cksm_err_cnt = 90,
+       spx5_stats_pmac_rx_hih_cksm_err_cnt = 91,
+       spx5_stats_rx_xgmii_prot_err_cnt = 92,
+       spx5_stats_pmac_rx_xgmii_prot_err_cnt = 93,
+       spx5_stats_ana_ac_port_stat_lsb_cnt = 94,
+       spx5_stats_green_p0_rx_fwd = 95,
+       spx5_stats_green_p0_rx_port_drop = 111,
+       spx5_stats_green_p0_tx_port = 127,
+       spx5_stats_rx_local_drop = 143,
+       spx5_stats_tx_local_drop = 144,
+       spx5_stats_count = 145,
+};
+
+static const char *const sparx5_stats_layout[] = {
+       "mm_rx_assembly_err_cnt",
+       "mm_rx_assembly_ok_cnt",
+       "mm_rx_merge_frag_cnt",
+       "mm_rx_smd_err_cnt",
+       "mm_tx_pfragment_cnt",
+       "rx_bad_bytes_cnt",
+       "pmac_rx_bad_bytes_cnt",
+       "rx_in_bytes_cnt",
+       "rx_ipg_shrink_cnt",
+       "rx_sync_lost_err_cnt",
+       "rx_tagged_frms_cnt",
+       "rx_untagged_frms_cnt",
+       "tx_out_bytes_cnt",
+       "tx_tagged_frms_cnt",
+       "tx_untagged_frms_cnt",
+       "rx_hih_cksm_err_cnt",
+       "pmac_rx_hih_cksm_err_cnt",
+       "rx_xgmii_prot_err_cnt",
+       "pmac_rx_xgmii_prot_err_cnt",
+       "rx_port_policer_drop",
+       "rx_fwd_green_p0",
+       "rx_fwd_green_p1",
+       "rx_fwd_green_p2",
+       "rx_fwd_green_p3",
+       "rx_fwd_green_p4",
+       "rx_fwd_green_p5",
+       "rx_fwd_green_p6",
+       "rx_fwd_green_p7",
+       "rx_fwd_yellow_p0",
+       "rx_fwd_yellow_p1",
+       "rx_fwd_yellow_p2",
+       "rx_fwd_yellow_p3",
+       "rx_fwd_yellow_p4",
+       "rx_fwd_yellow_p5",
+       "rx_fwd_yellow_p6",
+       "rx_fwd_yellow_p7",
+       "rx_port_drop_green_p0",
+       "rx_port_drop_green_p1",
+       "rx_port_drop_green_p2",
+       "rx_port_drop_green_p3",
+       "rx_port_drop_green_p4",
+       "rx_port_drop_green_p5",
+       "rx_port_drop_green_p6",
+       "rx_port_drop_green_p7",
+       "rx_port_drop_yellow_p0",
+       "rx_port_drop_yellow_p1",
+       "rx_port_drop_yellow_p2",
+       "rx_port_drop_yellow_p3",
+       "rx_port_drop_yellow_p4",
+       "rx_port_drop_yellow_p5",
+       "rx_port_drop_yellow_p6",
+       "rx_port_drop_yellow_p7",
+       "tx_port_green_p0",
+       "tx_port_green_p1",
+       "tx_port_green_p2",
+       "tx_port_green_p3",
+       "tx_port_green_p4",
+       "tx_port_green_p5",
+       "tx_port_green_p6",
+       "tx_port_green_p7",
+       "tx_port_yellow_p0",
+       "tx_port_yellow_p1",
+       "tx_port_yellow_p2",
+       "tx_port_yellow_p3",
+       "tx_port_yellow_p4",
+       "tx_port_yellow_p5",
+       "tx_port_yellow_p6",
+       "tx_port_yellow_p7",
+       "rx_local_drop",
+       "tx_local_drop",
+};
+
+static void sparx5_get_queue_sys_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats;
+       u64 *stats;
+       u32 addr;
+       int idx;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       mutex_lock(&sparx5->queue_stats_lock);
+       spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno), sparx5, XQS_STAT_CFG);
+       addr = 0;
+       stats = &portstats[spx5_stats_green_p0_rx_fwd];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+               sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+       addr = 16;
+       stats = &portstats[spx5_stats_green_p0_rx_port_drop];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+               sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+       addr = 256;
+       stats = &portstats[spx5_stats_green_p0_tx_port];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++addr, ++stats)
+               sparx5_update_counter(stats, spx5_rd(sparx5, XQS_CNT(addr)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_local_drop],
+                             spx5_rd(sparx5, XQS_CNT(32)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_local_drop],
+                             spx5_rd(sparx5, XQS_CNT(272)));
+       mutex_unlock(&sparx5->queue_stats_lock);
+}
+
+static void sparx5_get_ana_ac_stats_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+
+       sparx5_update_counter(&portstats[spx5_stats_ana_ac_port_stat_lsb_cnt],
+                             spx5_rd(sparx5, ANA_AC_PORT_STAT_LSB_CNT(portno,
+                                                                      SPX5_PORT_POLICER_DROPS)));
+}
+
+static void sparx5_get_dev_phy_stats(u64 *portstats, void __iomem *inst, u32
+                                    tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SYMBOL_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SYMBOL_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_stats(u64 *portstats, void __iomem *inst, u32
+                                    tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_TX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_RX_UC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_CRC_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_CRC_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_ALIGNMENT_LOST_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OK_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_TX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_TX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_RX_MC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+                             spx5_inst_rd(inst, DEV5G_PMAC_RX_BC_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+                                         u32 tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_PAUSE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_UNSUP_OPCODE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(tinst)));
+}
+
+static void sparx5_get_dev_rmon_stats(u64 *portstats, void __iomem *inst, u32
+                                     tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_UNDERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_UNDERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_OVERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_OVERSIZE_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_FRAGMENTS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_FRAGMENTS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_JABBERS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_JABBERS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_SIZE1519TOMAX_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+                             spx5_inst_rd(inst, DEV5G_TX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE64_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE65TO127_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE128TO255_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE256TO511_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE512TO1023_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE1024TO1518_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_SIZE1519TOMAX_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(tinst)));
+}
+
+static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32
+                                     tinst)
+{
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_ASSEMBLY_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_ASSEMBLY_OK_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_MERGE_FRAG_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_RX_SMD_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_MM_TX_PFRAGMENT_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_BAD_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_BAD_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+                             spx5_inst_rd(inst, DEV5G_RX_IN_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_IPG_SHRINK_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_TAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_UNTAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_OUT_BYTES_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_TAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_TX_UNTAGGED_FRMS_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_hih_cksm_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_HIH_CKSM_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_hih_cksm_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_xgmii_prot_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_RX_XGMII_PROT_ERR_CNT(tinst)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_xgmii_prot_err_cnt],
+                             spx5_inst_rd(inst,
+                                          DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(tinst)));
+}
+
+static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+       u32 tinst = sparx5_port_dev_index(portno);
+       u32 dev = sparx5_to_high_dev(portno);
+       void __iomem *inst;
+
+       inst = spx5_inst_get(sparx5, dev, tinst);
+       sparx5_get_dev_phy_stats(portstats, inst, tinst);
+       sparx5_get_dev_mac_stats(portstats, inst, tinst);
+       sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+       sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+       sparx5_get_dev_misc_stats(portstats, inst, tinst);
+}
+
+static void sparx5_get_asm_phy_stats(u64 *portstats, void __iomem *inst, int
+                                    portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SYMBOL_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_symbol_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SYMBOL_ERR_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_stats(u64 *portstats, void __iomem *inst, int
+                                    portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_TX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_TX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_TX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_TX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_backoff1_cnt],
+                             spx5_inst_rd(inst, ASM_TX_BACKOFF1_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_multi_coll_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_MULTI_COLL_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_RX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_uc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_RX_UC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_RX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_RX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_crc_err_cnt],
+                             spx5_inst_rd(inst, ASM_RX_CRC_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_crc_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_CRC_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_ALIGNMENT_LOST_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_alignment_lost_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_ALIGNMENT_LOST_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_TX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_defer_cnt],
+                             spx5_inst_rd(inst, ASM_TX_DEFER_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_late_coll_cnt],
+                             spx5_inst_rd(inst, ASM_TX_LATE_COLL_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_xcoll_cnt],
+                             spx5_inst_rd(inst, ASM_TX_XCOLL_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_csense_cnt],
+                             spx5_inst_rd(inst, ASM_TX_CSENSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_RX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_ok_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OK_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_TX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_TX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_xdefer_cnt],
+                             spx5_inst_rd(inst, ASM_TX_XDEFER_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_mc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_RX_MC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bc_cnt],
+                             spx5_inst_rd(inst, ASM_PMAC_RX_BC_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_in_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+}
+
+static void sparx5_get_asm_mac_ctrl_stats(u64 *portstats, void __iomem *inst,
+                                         int portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_tx_pause_cnt],
+                             spx5_inst_rd(inst, ASM_TX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_pause_cnt],
+                             spx5_inst_rd(inst, ASM_RX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_pause_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_PAUSE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_UNSUP_OPCODE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_unsup_opcode_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_UNSUP_OPCODE_CNT(portno)));
+}
+
+static void sparx5_get_asm_rmon_stats(u64 *portstats, void __iomem *inst, int
+                                     portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_rx_undersize_cnt],
+                             spx5_inst_rd(inst, ASM_RX_UNDERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_undersize_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_UNDERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_oversize_cnt],
+                             spx5_inst_rd(inst, ASM_RX_OVERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_oversize_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_OVERSIZE_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_fragments_cnt],
+                             spx5_inst_rd(inst, ASM_RX_FRAGMENTS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_fragments_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_FRAGMENTS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_jabbers_cnt],
+                             spx5_inst_rd(inst, ASM_RX_JABBERS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_jabbers_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_JABBERS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size64_cnt],
+                             spx5_inst_rd(inst, ASM_RX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SIZE1519TOMAX_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_SIZE1519TOMAX_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size64_cnt],
+                             spx5_inst_rd(inst, ASM_TX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size64_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE64_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size65to127_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE65TO127_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size128to255_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE128TO255_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size256to511_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE256TO511_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size512to1023_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE512TO1023_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1024to1518_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE1024TO1518_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_SIZE1519TOMAX_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_tx_size1519tomax_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_TX_SIZE1519TOMAX_CNT(portno)));
+}
+
+static void sparx5_get_asm_misc_stats(u64 *portstats, void __iomem *inst, int
+                                     portno)
+{
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_ASSEMBLY_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_assembly_ok_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_ASSEMBLY_OK_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_merge_frag_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_MERGE_FRAG_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_rx_smd_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_RX_SMD_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_mm_tx_pfragment_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_MM_TX_PFRAGMENT_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_RX_BAD_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_pmac_rx_bad_bytes_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_PMAC_RX_BAD_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_in_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_RX_IN_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_ipg_shrink_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_IPG_SHRINK_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_sync_lost_err_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_SYNC_LOST_ERR_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_TAGGED_FRMS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_rx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_RX_UNTAGGED_FRMS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_out_bytes_cnt],
+                             spx5_inst_rd(inst, ASM_TX_OUT_BYTES_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_tagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_TAGGED_FRMS_CNT(portno)));
+       sparx5_update_counter(&portstats[spx5_stats_tx_untagged_frms_cnt],
+                             spx5_inst_rd(inst,
+                                          ASM_TX_UNTAGGED_FRMS_CNT(portno)));
+}
+
+static void sparx5_get_asm_stats(struct sparx5 *sparx5, int portno)
+{
+       u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
+       void __iomem *inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+
+       sparx5_get_asm_phy_stats(portstats, inst, portno);
+       sparx5_get_asm_mac_stats(portstats, inst, portno);
+       sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+       sparx5_get_asm_rmon_stats(portstats, inst, portno);
+       sparx5_get_asm_misc_stats(portstats, inst, portno);
+}
+
+static const struct ethtool_rmon_hist_range sparx5_rmon_ranges[] = {
+       {    0,    64 },
+       {   65,   127 },
+       {  128,   255 },
+       {  256,   511 },
+       {  512,  1023 },
+       { 1024,  1518 },
+       { 1519, 10239 },
+       {}
+};
+
+static void sparx5_get_eth_phy_stats(struct net_device *ndev,
+                                    struct ethtool_eth_phy_stats *phy_stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_phy_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_phy_stats(portstats, inst, portno);
+       }
+       phy_stats->SymbolErrorDuringCarrier =
+               portstats[spx5_stats_rx_symbol_err_cnt] +
+               portstats[spx5_stats_pmac_rx_symbol_err_cnt];
+}
+
+static void sparx5_get_eth_mac_stats(struct net_device *ndev,
+                                    struct ethtool_eth_mac_stats *mac_stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_mac_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_mac_stats(portstats, inst, portno);
+       }
+       mac_stats->FramesTransmittedOK = portstats[spx5_stats_tx_uc_cnt] +
+               portstats[spx5_stats_pmac_tx_uc_cnt] +
+               portstats[spx5_stats_tx_mc_cnt] +
+               portstats[spx5_stats_tx_bc_cnt];
+       mac_stats->SingleCollisionFrames =
+               portstats[spx5_stats_tx_backoff1_cnt];
+       mac_stats->MultipleCollisionFrames =
+               portstats[spx5_stats_tx_multi_coll_cnt];
+       mac_stats->FramesReceivedOK = portstats[spx5_stats_rx_uc_cnt] +
+               portstats[spx5_stats_pmac_rx_uc_cnt] +
+               portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_rx_bc_cnt];
+       mac_stats->FrameCheckSequenceErrors =
+               portstats[spx5_stats_rx_crc_err_cnt] +
+               portstats[spx5_stats_pmac_rx_crc_err_cnt];
+       mac_stats->AlignmentErrors = portstats[spx5_stats_rx_alignment_lost_cnt]
+               + portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+       mac_stats->OctetsTransmittedOK = portstats[spx5_stats_tx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+       mac_stats->FramesWithDeferredXmissions =
+               portstats[spx5_stats_tx_defer_cnt];
+       mac_stats->LateCollisions =
+               portstats[spx5_stats_tx_late_coll_cnt];
+       mac_stats->FramesAbortedDueToXSColls =
+               portstats[spx5_stats_tx_xcoll_cnt];
+       mac_stats->CarrierSenseErrors = portstats[spx5_stats_tx_csense_cnt];
+       mac_stats->OctetsReceivedOK = portstats[spx5_stats_rx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+       mac_stats->MulticastFramesXmittedOK = portstats[spx5_stats_tx_mc_cnt] +
+               portstats[spx5_stats_pmac_tx_mc_cnt];
+       mac_stats->BroadcastFramesXmittedOK = portstats[spx5_stats_tx_bc_cnt] +
+               portstats[spx5_stats_pmac_tx_bc_cnt];
+       mac_stats->FramesWithExcessiveDeferral =
+               portstats[spx5_stats_tx_xdefer_cnt];
+       mac_stats->MulticastFramesReceivedOK = portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_pmac_rx_mc_cnt];
+       mac_stats->BroadcastFramesReceivedOK = portstats[spx5_stats_rx_bc_cnt] +
+               portstats[spx5_stats_pmac_rx_bc_cnt];
+       mac_stats->InRangeLengthErrors =
+               portstats[spx5_stats_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_in_range_len_err_cnt];
+       mac_stats->OutOfRangeLengthField =
+               portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt];
+       mac_stats->FrameTooLongErrors = portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt];
+}
+
+static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev,
+                                         struct ethtool_eth_ctrl_stats *mac_ctrl_stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_mac_ctrl_stats(portstats, inst, portno);
+       }
+       mac_ctrl_stats->MACControlFramesTransmitted =
+               portstats[spx5_stats_tx_pause_cnt] +
+               portstats[spx5_stats_pmac_tx_pause_cnt];
+       mac_ctrl_stats->MACControlFramesReceived =
+               portstats[spx5_stats_rx_pause_cnt] +
+               portstats[spx5_stats_pmac_rx_pause_cnt];
+       mac_ctrl_stats->UnsupportedOpcodesReceived =
+               portstats[spx5_stats_rx_unsup_opcode_cnt] +
+               portstats[spx5_stats_pmac_rx_unsup_opcode_cnt];
+}
+
+static void sparx5_get_eth_rmon_stats(struct net_device *ndev,
+                                     struct ethtool_rmon_stats *rmon_stats,
+                                     const struct ethtool_rmon_hist_range **ranges)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_rmon_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_rmon_stats(portstats, inst, portno);
+       }
+       rmon_stats->undersize_pkts = portstats[spx5_stats_rx_undersize_cnt] +
+               portstats[spx5_stats_pmac_rx_undersize_cnt];
+       rmon_stats->oversize_pkts = portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt];
+       rmon_stats->fragments = portstats[spx5_stats_rx_fragments_cnt] +
+               portstats[spx5_stats_pmac_rx_fragments_cnt];
+       rmon_stats->jabbers = portstats[spx5_stats_rx_jabbers_cnt] +
+               portstats[spx5_stats_pmac_rx_jabbers_cnt];
+       rmon_stats->hist[0] = portstats[spx5_stats_rx_size64_cnt] +
+               portstats[spx5_stats_pmac_rx_size64_cnt];
+       rmon_stats->hist[1] = portstats[spx5_stats_rx_size65to127_cnt] +
+               portstats[spx5_stats_pmac_rx_size65to127_cnt];
+       rmon_stats->hist[2] = portstats[spx5_stats_rx_size128to255_cnt] +
+               portstats[spx5_stats_pmac_rx_size128to255_cnt];
+       rmon_stats->hist[3] = portstats[spx5_stats_rx_size256to511_cnt] +
+               portstats[spx5_stats_pmac_rx_size256to511_cnt];
+       rmon_stats->hist[4] = portstats[spx5_stats_rx_size512to1023_cnt] +
+               portstats[spx5_stats_pmac_rx_size512to1023_cnt];
+       rmon_stats->hist[5] = portstats[spx5_stats_rx_size1024to1518_cnt] +
+               portstats[spx5_stats_pmac_rx_size1024to1518_cnt];
+       rmon_stats->hist[6] = portstats[spx5_stats_rx_size1519tomax_cnt] +
+               portstats[spx5_stats_pmac_rx_size1519tomax_cnt];
+       rmon_stats->hist_tx[0] = portstats[spx5_stats_tx_size64_cnt] +
+               portstats[spx5_stats_pmac_tx_size64_cnt];
+       rmon_stats->hist_tx[1] = portstats[spx5_stats_tx_size65to127_cnt] +
+               portstats[spx5_stats_pmac_tx_size65to127_cnt];
+       rmon_stats->hist_tx[2] = portstats[spx5_stats_tx_size128to255_cnt] +
+               portstats[spx5_stats_pmac_tx_size128to255_cnt];
+       rmon_stats->hist_tx[3] = portstats[spx5_stats_tx_size256to511_cnt] +
+               portstats[spx5_stats_pmac_tx_size256to511_cnt];
+       rmon_stats->hist_tx[4] = portstats[spx5_stats_tx_size512to1023_cnt] +
+               portstats[spx5_stats_pmac_tx_size512to1023_cnt];
+       rmon_stats->hist_tx[5] = portstats[spx5_stats_tx_size1024to1518_cnt] +
+               portstats[spx5_stats_pmac_tx_size1024to1518_cnt];
+       rmon_stats->hist_tx[6] = portstats[spx5_stats_tx_size1519tomax_cnt] +
+               portstats[spx5_stats_pmac_tx_size1519tomax_cnt];
+       *ranges = sparx5_rmon_ranges;
+}
+
+static int sparx5_get_sset_count(struct net_device *ndev, int sset)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5  *sparx5 = port->sparx5;
+
+       if (sset != ETH_SS_STATS)
+               return -EOPNOTSUPP;
+       return sparx5->num_ethtool_stats;
+}
+
+static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5  *sparx5 = port->sparx5;
+       int idx;
+
+       if (sset != ETH_SS_STATS)
+               return;
+
+       for (idx = 0; idx < sparx5->num_ethtool_stats; idx++)
+               strncpy(data + idx * ETH_GSTRING_LEN,
+                       sparx5->stats_layout[idx], ETH_GSTRING_LEN);
+}
+
+static void sparx5_get_sset_data(struct net_device *ndev,
+                                struct ethtool_stats *stats, u64 *data)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int portno = port->portno;
+       void __iomem *inst;
+       u64 *portstats;
+       int idx;
+
+       portstats = &sparx5->stats[portno * sparx5->num_stats];
+       if (sparx5_is_baser(port->conf.portmode)) {
+               u32 tinst = sparx5_port_dev_index(portno);
+               u32 dev = sparx5_to_high_dev(portno);
+
+               inst = spx5_inst_get(sparx5, dev, tinst);
+               sparx5_get_dev_misc_stats(portstats, inst, tinst);
+       } else {
+               inst = spx5_inst_get(sparx5, TARGET_ASM, 0);
+               sparx5_get_asm_misc_stats(portstats, inst, portno);
+       }
+       sparx5_get_ana_ac_stats_stats(sparx5, portno);
+       sparx5_get_queue_sys_stats(sparx5, portno);
+       /* Copy port counters to the ethtool buffer */
+       for (idx = spx5_stats_mm_rx_assembly_err_cnt;
+            idx < spx5_stats_mm_rx_assembly_err_cnt +
+            sparx5->num_ethtool_stats; idx++)
+               *data++ = portstats[idx];
+}
+
+void sparx5_get_stats64(struct net_device *ndev,
+                       struct rtnl_link_stats64 *stats)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       struct sparx5 *sparx5 = port->sparx5;
+       u64 *portstats;
+       int idx;
+
+       if (!sparx5->stats)
+               return; /* Not initialized yet */
+
+       portstats = &sparx5->stats[port->portno * sparx5->num_stats];
+
+       stats->rx_packets = portstats[spx5_stats_rx_uc_cnt] +
+               portstats[spx5_stats_pmac_rx_uc_cnt] +
+               portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_rx_bc_cnt];
+       stats->tx_packets = portstats[spx5_stats_tx_uc_cnt] +
+               portstats[spx5_stats_pmac_tx_uc_cnt] +
+               portstats[spx5_stats_tx_mc_cnt] +
+               portstats[spx5_stats_tx_bc_cnt];
+       stats->rx_bytes = portstats[spx5_stats_rx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_rx_ok_bytes_cnt];
+       stats->tx_bytes = portstats[spx5_stats_tx_ok_bytes_cnt] +
+               portstats[spx5_stats_pmac_tx_ok_bytes_cnt];
+       stats->rx_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt] +
+               portstats[spx5_stats_rx_crc_err_cnt] +
+               portstats[spx5_stats_pmac_rx_crc_err_cnt] +
+               portstats[spx5_stats_rx_alignment_lost_cnt] +
+               portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+       stats->tx_errors = portstats[spx5_stats_tx_xcoll_cnt] +
+               portstats[spx5_stats_tx_csense_cnt] +
+               portstats[spx5_stats_tx_late_coll_cnt];
+       stats->multicast = portstats[spx5_stats_rx_mc_cnt] +
+               portstats[spx5_stats_pmac_rx_mc_cnt];
+       stats->collisions = portstats[spx5_stats_tx_late_coll_cnt] +
+               portstats[spx5_stats_tx_xcoll_cnt] +
+               portstats[spx5_stats_tx_backoff1_cnt];
+       stats->rx_length_errors = portstats[spx5_stats_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_in_range_len_err_cnt] +
+               portstats[spx5_stats_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_pmac_rx_out_of_range_len_err_cnt] +
+               portstats[spx5_stats_rx_oversize_cnt] +
+               portstats[spx5_stats_pmac_rx_oversize_cnt];
+       stats->rx_crc_errors = portstats[spx5_stats_rx_crc_err_cnt] +
+               portstats[spx5_stats_pmac_rx_crc_err_cnt];
+       stats->rx_frame_errors = portstats[spx5_stats_rx_alignment_lost_cnt] +
+               portstats[spx5_stats_pmac_rx_alignment_lost_cnt];
+       stats->tx_aborted_errors = portstats[spx5_stats_tx_xcoll_cnt];
+       stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
+       stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
+       stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
+       for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+               stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+                                              + idx];
+       stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
+}
+
+static void sparx5_update_port_stats(struct sparx5 *sparx5, int portno)
+{
+       if (sparx5_is_baser(sparx5->ports[portno]->conf.portmode))
+               sparx5_get_device_stats(sparx5, portno);
+       else
+               sparx5_get_asm_stats(sparx5, portno);
+       sparx5_get_ana_ac_stats_stats(sparx5, portno);
+       sparx5_get_queue_sys_stats(sparx5, portno);
+}
+
+static void sparx5_update_stats(struct sparx5 *sparx5)
+{
+       int idx;
+
+       for (idx = 0; idx < SPX5_PORTS; idx++)
+               if (sparx5->ports[idx])
+                       sparx5_update_port_stats(sparx5, idx);
+}
+
+static void sparx5_check_stats_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct sparx5 *sparx5 = container_of(dwork,
+                                            struct sparx5,
+                                            stats_work);
+
+       sparx5_update_stats(sparx5);
+
+       queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+                          SPX5_STATS_CHECK_DELAY);
+}
+
+static int sparx5_get_link_settings(struct net_device *ndev,
+                                   struct ethtool_link_ksettings *cmd)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+
+       return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int sparx5_set_link_settings(struct net_device *ndev,
+                                   const struct ethtool_link_ksettings *cmd)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+
+       return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void sparx5_config_stats(struct sparx5 *sparx5)
+{
+       /* Enable global events for port policer drops */
+       spx5_rmw(ANA_AC_PORT_SGE_CFG_MASK_SET(0xf0f0),
+                ANA_AC_PORT_SGE_CFG_MASK,
+                sparx5,
+                ANA_AC_PORT_SGE_CFG(SPX5_PORT_POLICER_DROPS));
+}
+
+static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
+{
+       /* Clear Queue System counters */
+       spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(portno) |
+               XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(3), sparx5,
+               XQS_STAT_CFG);
+
+       /* Use counter for port policer drop count */
+       spx5_rmw(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(1) |
+                ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(0) |
+                ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(0xff),
+                ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE |
+                ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE |
+                ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK,
+                sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS));
+}
+
+const struct ethtool_ops sparx5_ethtool_ops = {
+       .get_sset_count         = sparx5_get_sset_count,
+       .get_strings            = sparx5_get_sset_strings,
+       .get_ethtool_stats      = sparx5_get_sset_data,
+       .get_link_ksettings     = sparx5_get_link_settings,
+       .set_link_ksettings     = sparx5_set_link_settings,
+       .get_link               = ethtool_op_get_link,
+       .get_eth_phy_stats      = sparx5_get_eth_phy_stats,
+       .get_eth_mac_stats      = sparx5_get_eth_mac_stats,
+       .get_eth_ctrl_stats     = sparx5_get_eth_mac_ctrl_stats,
+       .get_rmon_stats         = sparx5_get_eth_rmon_stats,
+};
+
+int sparx_stats_init(struct sparx5 *sparx5)
+{
+       char queue_name[32];
+       int portno;
+
+       sparx5->stats_layout = sparx5_stats_layout;
+       sparx5->num_stats = spx5_stats_count;
+       sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout);
+       sparx5->stats = devm_kcalloc(sparx5->dev,
+                                    SPX5_PORTS_ALL * sparx5->num_stats,
+                                    sizeof(u64), GFP_KERNEL);
+       if (!sparx5->stats)
+               return -ENOMEM;
+
+       mutex_init(&sparx5->queue_stats_lock);
+       sparx5_config_stats(sparx5);
+       for (portno = 0; portno < SPX5_PORTS; portno++)
+               if (sparx5->ports[portno])
+                       sparx5_config_port_stats(sparx5, portno);
+
+       snprintf(queue_name, sizeof(queue_name), "%s-stats",
+                dev_name(sparx5->dev));
+       sparx5->stats_queue = create_singlethread_workqueue(queue_name);
+       INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work);
+       queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
+                          SPX5_STATS_CHECK_DELAY);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
new file mode 100644 (file)
index 0000000..0443f66
--- /dev/null
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+/* Commands for Mac Table Command register */
+#define MAC_CMD_LEARN         0 /* Insert (Learn) 1 entry */
+#define MAC_CMD_UNLEARN       1 /* Unlearn (Forget) 1 entry */
+#define MAC_CMD_LOOKUP        2 /* Look up 1 entry */
+#define MAC_CMD_READ          3 /* Read entry at Mac Table Index */
+#define MAC_CMD_WRITE         4 /* Write entry at Mac Table Index */
+#define MAC_CMD_SCAN          5 /* Scan (Age or find next) */
+#define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
+#define MAC_CMD_CLEAR_ALL     7 /* Delete all entries in table */
+
+/* Commands for MAC_ENTRY_ADDR_TYPE */
+#define  MAC_ENTRY_ADDR_TYPE_UPSID_PN         0
+#define  MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
+#define  MAC_ENTRY_ADDR_TYPE_GLAG             2
+#define  MAC_ENTRY_ADDR_TYPE_MC_IDX           3
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+struct sparx5_mact_entry {
+       struct list_head list;
+       unsigned char mac[ETH_ALEN];
+       u32 flags;
+#define MAC_ENT_ALIVE  BIT(0)
+#define MAC_ENT_MOVED  BIT(1)
+#define MAC_ENT_LOCK   BIT(2)
+       u16 vid;
+       u16 port;
+};
+
+static int sparx5_mact_get_status(struct sparx5 *sparx5)
+{
+       return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
+}
+
+static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
+{
+       u32 val;
+
+       return readx_poll_timeout(sparx5_mact_get_status,
+               sparx5, val,
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
+               TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void sparx5_mact_select(struct sparx5 *sparx5,
+                              const unsigned char mac[ETH_ALEN],
+                              u16 vid)
+{
+       u32 macl = 0, mach = 0;
+
+       /* Set the MAC address to handle and the vlan associated in a format
+        * understood by the hardware.
+        */
+       mach |= vid    << 16;
+       mach |= mac[0] << 8;
+       mach |= mac[1] << 0;
+       macl |= mac[2] << 24;
+       macl |= mac[3] << 16;
+       macl |= mac[4] << 8;
+       macl |= mac[5] << 0;
+
+       spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
+       spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
+}
+
+int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
+                     const unsigned char mac[ETH_ALEN], u16 vid)
+{
+       int addr, type, ret;
+
+       if (pgid < SPX5_PORTS) {
+               type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
+               addr = pgid % 32;
+               addr += (pgid / 32) << 5; /* Add upsid */
+       } else {
+               type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
+               addr = pgid - SPX5_PORTS;
+       }
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, vid);
+
+       /* MAC entry properties */
+       spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
+               LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
+               LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
+               LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
+               sparx5, LRN_MAC_ACCESS_CFG_2);
+       spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
+
+       /*  Insert/learn new entry */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+
+       mutex_unlock(&sparx5->lock);
+
+       return ret;
+}
+
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+
+       return sparx5_mact_forget(sparx5, addr, port->pvid);
+}
+
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+
+       return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
+}
+
+static int sparx5_mact_get(struct sparx5 *sparx5,
+                          unsigned char mac[ETH_ALEN],
+                          u16 *vid, u32 *pcfg2)
+{
+       u32 mach, macl, cfg2;
+       int ret = -ENOENT;
+
+       cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+       if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
+               mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
+               macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
+               mac[0] = ((mach >> 8)  & 0xff);
+               mac[1] = ((mach >> 0)  & 0xff);
+               mac[2] = ((macl >> 24) & 0xff);
+               mac[3] = ((macl >> 16) & 0xff);
+               mac[4] = ((macl >> 8)  & 0xff);
+               mac[5] = ((macl >> 0)  & 0xff);
+               *vid = mach >> 16;
+               *pcfg2 = cfg2;
+               ret = 0;
+       }
+
+       return ret;
+}
+
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+                        unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
+{
+       u32 cfg2;
+       int ret;
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, *vid);
+
+       spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
+               LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+               sparx5, LRN_SCAN_NEXT_CFG);
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+               (MAC_CMD_FIND_SMALLEST) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+       if (ret == 0) {
+               ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
+               if (ret == 0)
+                       *pcfg2 = cfg2;
+       }
+
+       mutex_unlock(&sparx5->lock);
+
+       return ret == 0;
+}
+
+static int sparx5_mact_lookup(struct sparx5 *sparx5,
+                             const unsigned char mac[ETH_ALEN],
+                             u16 vid)
+{
+       int ret;
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, vid);
+
+       /* Issue a lookup command */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+       if (ret)
+               goto out;
+
+       ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
+               (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
+
+out:
+       mutex_unlock(&sparx5->lock);
+
+       return ret;
+}
+
+int sparx5_mact_forget(struct sparx5 *sparx5,
+                      const unsigned char mac[ETH_ALEN], u16 vid)
+{
+       int ret;
+
+       mutex_lock(&sparx5->lock);
+
+       sparx5_mact_select(sparx5, mac, vid);
+
+       /* Issue an unlearn command */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       ret = sparx5_mact_wait_for_completion(sparx5);
+
+       mutex_unlock(&sparx5->lock);
+
+       return ret;
+}
+
+static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
+                                                 const unsigned char *mac,
+                                                 u16 vid, u16 port_index)
+{
+       struct sparx5_mact_entry *mact_entry;
+
+       mact_entry = devm_kzalloc(sparx5->dev,
+                                 sizeof(*mact_entry), GFP_ATOMIC);
+       if (!mact_entry)
+               return NULL;
+
+       memcpy(mact_entry->mac, mac, ETH_ALEN);
+       mact_entry->vid = vid;
+       mact_entry->port = port_index;
+       return mact_entry;
+}
+
+static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
+                                                const unsigned char *mac,
+                                                u16 vid, u16 port_index)
+{
+       struct sparx5_mact_entry *mact_entry;
+       struct sparx5_mact_entry *res = NULL;
+
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+               if (mact_entry->vid == vid &&
+                   ether_addr_equal(mac, mact_entry->mac) &&
+                   mact_entry->port == port_index) {
+                       res = mact_entry;
+                       break;
+               }
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       return res;
+}
+
+static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
+                                     const char *mac, u16 vid,
+                                     struct net_device *dev, bool offloaded)
+{
+       struct switchdev_notifier_fdb_info info;
+
+       info.addr = mac;
+       info.vid = vid;
+       info.offloaded = offloaded;
+       call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+                         struct sparx5_port *port,
+                         const unsigned char *addr, u16 vid)
+{
+       struct sparx5_mact_entry *mact_entry;
+       int ret;
+
+       ret = sparx5_mact_lookup(sparx5, addr, vid);
+       if (ret)
+               return 0;
+
+       /* In case the entry already exists, don't add it again to SW,
+        * just update HW, but we need to look in the actual HW because
+        * it is possible for an entry to be learn by HW and before the
+        * mact thread to start the frame will reach CPU and the CPU will
+        * add the entry but without the extern_learn flag.
+        */
+       mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
+       if (mact_entry)
+               goto update_hw;
+
+       /* Add the entry in SW MAC table not to get the notification when
+        * SW is pulling again
+        */
+       mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
+       if (!mact_entry)
+               return -ENOMEM;
+
+       mutex_lock(&sparx5->mact_lock);
+       list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+       mutex_unlock(&sparx5->mact_lock);
+
+update_hw:
+       ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
+
+       /* New entry? */
+       if (mact_entry->flags == 0) {
+               mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
+               sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
+                                         port->ndev, true);
+       }
+
+       return ret;
+}
+
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+                         const unsigned char *addr,
+                         u16 vid)
+{
+       struct sparx5_mact_entry *mact_entry, *tmp;
+
+       /* Delete the entry in SW MAC table not to get the notification when
+        * SW is pulling again
+        */
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+                                list) {
+               if ((vid == 0 || mact_entry->vid == vid) &&
+                   ether_addr_equal(addr, mact_entry->mac)) {
+                       list_del(&mact_entry->list);
+                       devm_kfree(sparx5->dev, mact_entry);
+
+                       sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+               }
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       return 0;
+}
+
+static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
+                                    unsigned char mac[ETH_ALEN],
+                                    u16 vid, u32 cfg2)
+{
+       struct sparx5_mact_entry *mact_entry;
+       bool found = false;
+       u16 port;
+
+       if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
+           MAC_ENTRY_ADDR_TYPE_UPSID_PN)
+               return;
+
+       port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
+       if (port >= SPX5_PORTS)
+               return;
+
+       if (!test_bit(port, sparx5->bridge_mask))
+               return;
+
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
+               if (mact_entry->vid == vid &&
+                   ether_addr_equal(mac, mact_entry->mac)) {
+                       found = true;
+                       mact_entry->flags |= MAC_ENT_ALIVE;
+                       if (mact_entry->port != port) {
+                               dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
+                                        mact_entry->port, port);
+                               mact_entry->port = port;
+                               mact_entry->flags |= MAC_ENT_MOVED;
+                       }
+                       /* Entry handled */
+                       break;
+               }
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       if (found && !(mact_entry->flags & MAC_ENT_MOVED))
+               /* Present, not moved */
+               return;
+
+       if (!found) {
+               /* Entry not found - now add */
+               mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
+               if (!mact_entry)
+                       return;
+
+               mact_entry->flags |= MAC_ENT_ALIVE;
+               mutex_lock(&sparx5->mact_lock);
+               list_add_tail(&mact_entry->list, &sparx5->mact_entries);
+               mutex_unlock(&sparx5->mact_lock);
+       }
+
+       /* New or moved entry - notify bridge */
+       sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+                                 mac, vid, sparx5->ports[port]->ndev,
+                                 true);
+}
+
+void sparx5_mact_pull_work(struct work_struct *work)
+{
+       struct delayed_work *del_work = to_delayed_work(work);
+       struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
+                                            mact_work);
+       struct sparx5_mact_entry *mact_entry, *tmp;
+       unsigned char mac[ETH_ALEN];
+       u32 cfg2;
+       u16 vid;
+       int ret;
+
+       /* Reset MAC entry flags */
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
+               mact_entry->flags &= MAC_ENT_LOCK;
+       mutex_unlock(&sparx5->mact_lock);
+
+       /* MAIN mac address processing loop */
+       vid = 0;
+       memset(mac, 0, sizeof(mac));
+       do {
+               mutex_lock(&sparx5->lock);
+               sparx5_mact_select(sparx5, mac, vid);
+               spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
+                       sparx5, LRN_SCAN_NEXT_CFG);
+               spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
+                       (MAC_CMD_FIND_SMALLEST) |
+                       LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+                       sparx5, LRN_COMMON_ACCESS_CTRL);
+               ret = sparx5_mact_wait_for_completion(sparx5);
+               if (ret == 0)
+                       ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
+               mutex_unlock(&sparx5->lock);
+               if (ret == 0)
+                       sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
+       } while (ret == 0);
+
+       mutex_lock(&sparx5->mact_lock);
+       list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
+                                list) {
+               /* If the entry is in HW or permanent, then skip */
+               if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
+                       continue;
+
+               sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+                                         mact_entry->mac, mact_entry->vid,
+                                         sparx5->ports[mact_entry->port]->ndev,
+                                         true);
+
+               list_del(&mact_entry->list);
+               devm_kfree(sparx5->dev, mact_entry);
+       }
+       mutex_unlock(&sparx5->mact_lock);
+
+       queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+                          SPX5_MACT_PULL_DELAY);
+}
+
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
+{
+       int value = max(1, msecs / 10); /* unit 10 ms */
+
+       spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
+                LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
+                LRN_AUTOAGE_CFG_UNIT_SIZE |
+                LRN_AUTOAGE_CFG_PERIOD_VAL,
+                sparx5,
+                LRN_AUTOAGE_CFG(0));
+}
+
+void sparx5_mact_init(struct sparx5 *sparx5)
+{
+       mutex_init(&sparx5->lock);
+
+       /*  Flush MAC table */
+       spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
+               LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
+               sparx5, LRN_COMMON_ACCESS_CTRL);
+
+       if (sparx5_mact_wait_for_completion(sparx5) != 0)
+               dev_warn(sparx5->dev, "MAC flush error\n");
+
+       sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
new file mode 100644 (file)
index 0000000..abaa086
--- /dev/null
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <net/switchdev.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/reset.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define QLIM_WM(fraction) \
+       ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
+#define IO_RANGES 3
+
+struct initial_port_config {
+       u32 portno;
+       struct device_node *node;
+       struct sparx5_port_config conf;
+       struct phy *serdes;
+};
+
+struct sparx5_ram_config {
+       void __iomem *init_reg;
+       u32 init_val;
+};
+
+struct sparx5_main_io_resource {
+       enum sparx5_target id;
+       phys_addr_t offset;
+       int range;
+};
+
+static const struct sparx5_main_io_resource sparx5_main_iomap[] =  {
+       { TARGET_CPU,                         0, 0 }, /* 0x600000000 */
+       { TARGET_FDMA,                  0x80000, 0 }, /* 0x600080000 */
+       { TARGET_PCEP,                 0x400000, 0 }, /* 0x600400000 */
+       { TARGET_DEV2G5,             0x10004000, 1 }, /* 0x610004000 */
+       { TARGET_DEV5G,              0x10008000, 1 }, /* 0x610008000 */
+       { TARGET_PCS5G_BR,           0x1000c000, 1 }, /* 0x61000c000 */
+       { TARGET_DEV2G5 +  1,        0x10010000, 1 }, /* 0x610010000 */
+       { TARGET_DEV5G +  1,         0x10014000, 1 }, /* 0x610014000 */
+       { TARGET_PCS5G_BR +  1,      0x10018000, 1 }, /* 0x610018000 */
+       { TARGET_DEV2G5 +  2,        0x1001c000, 1 }, /* 0x61001c000 */
+       { TARGET_DEV5G +  2,         0x10020000, 1 }, /* 0x610020000 */
+       { TARGET_PCS5G_BR +  2,      0x10024000, 1 }, /* 0x610024000 */
+       { TARGET_DEV2G5 +  6,        0x10028000, 1 }, /* 0x610028000 */
+       { TARGET_DEV5G +  6,         0x1002c000, 1 }, /* 0x61002c000 */
+       { TARGET_PCS5G_BR +  6,      0x10030000, 1 }, /* 0x610030000 */
+       { TARGET_DEV2G5 +  7,        0x10034000, 1 }, /* 0x610034000 */
+       { TARGET_DEV5G +  7,         0x10038000, 1 }, /* 0x610038000 */
+       { TARGET_PCS5G_BR +  7,      0x1003c000, 1 }, /* 0x61003c000 */
+       { TARGET_DEV2G5 +  8,        0x10040000, 1 }, /* 0x610040000 */
+       { TARGET_DEV5G +  8,         0x10044000, 1 }, /* 0x610044000 */
+       { TARGET_PCS5G_BR +  8,      0x10048000, 1 }, /* 0x610048000 */
+       { TARGET_DEV2G5 +  9,        0x1004c000, 1 }, /* 0x61004c000 */
+       { TARGET_DEV5G +  9,         0x10050000, 1 }, /* 0x610050000 */
+       { TARGET_PCS5G_BR +  9,      0x10054000, 1 }, /* 0x610054000 */
+       { TARGET_DEV2G5 + 10,        0x10058000, 1 }, /* 0x610058000 */
+       { TARGET_DEV5G + 10,         0x1005c000, 1 }, /* 0x61005c000 */
+       { TARGET_PCS5G_BR + 10,      0x10060000, 1 }, /* 0x610060000 */
+       { TARGET_DEV2G5 + 11,        0x10064000, 1 }, /* 0x610064000 */
+       { TARGET_DEV5G + 11,         0x10068000, 1 }, /* 0x610068000 */
+       { TARGET_PCS5G_BR + 11,      0x1006c000, 1 }, /* 0x61006c000 */
+       { TARGET_DEV2G5 + 12,        0x10070000, 1 }, /* 0x610070000 */
+       { TARGET_DEV10G,             0x10074000, 1 }, /* 0x610074000 */
+       { TARGET_PCS10G_BR,          0x10078000, 1 }, /* 0x610078000 */
+       { TARGET_DEV2G5 + 14,        0x1007c000, 1 }, /* 0x61007c000 */
+       { TARGET_DEV10G +  2,        0x10080000, 1 }, /* 0x610080000 */
+       { TARGET_PCS10G_BR +  2,     0x10084000, 1 }, /* 0x610084000 */
+       { TARGET_DEV2G5 + 15,        0x10088000, 1 }, /* 0x610088000 */
+       { TARGET_DEV10G +  3,        0x1008c000, 1 }, /* 0x61008c000 */
+       { TARGET_PCS10G_BR +  3,     0x10090000, 1 }, /* 0x610090000 */
+       { TARGET_DEV2G5 + 16,        0x10094000, 1 }, /* 0x610094000 */
+       { TARGET_DEV2G5 + 17,        0x10098000, 1 }, /* 0x610098000 */
+       { TARGET_DEV2G5 + 18,        0x1009c000, 1 }, /* 0x61009c000 */
+       { TARGET_DEV2G5 + 19,        0x100a0000, 1 }, /* 0x6100a0000 */
+       { TARGET_DEV2G5 + 20,        0x100a4000, 1 }, /* 0x6100a4000 */
+       { TARGET_DEV2G5 + 21,        0x100a8000, 1 }, /* 0x6100a8000 */
+       { TARGET_DEV2G5 + 22,        0x100ac000, 1 }, /* 0x6100ac000 */
+       { TARGET_DEV2G5 + 23,        0x100b0000, 1 }, /* 0x6100b0000 */
+       { TARGET_DEV2G5 + 32,        0x100b4000, 1 }, /* 0x6100b4000 */
+       { TARGET_DEV2G5 + 33,        0x100b8000, 1 }, /* 0x6100b8000 */
+       { TARGET_DEV2G5 + 34,        0x100bc000, 1 }, /* 0x6100bc000 */
+       { TARGET_DEV2G5 + 35,        0x100c0000, 1 }, /* 0x6100c0000 */
+       { TARGET_DEV2G5 + 36,        0x100c4000, 1 }, /* 0x6100c4000 */
+       { TARGET_DEV2G5 + 37,        0x100c8000, 1 }, /* 0x6100c8000 */
+       { TARGET_DEV2G5 + 38,        0x100cc000, 1 }, /* 0x6100cc000 */
+       { TARGET_DEV2G5 + 39,        0x100d0000, 1 }, /* 0x6100d0000 */
+       { TARGET_DEV2G5 + 40,        0x100d4000, 1 }, /* 0x6100d4000 */
+       { TARGET_DEV2G5 + 41,        0x100d8000, 1 }, /* 0x6100d8000 */
+       { TARGET_DEV2G5 + 42,        0x100dc000, 1 }, /* 0x6100dc000 */
+       { TARGET_DEV2G5 + 43,        0x100e0000, 1 }, /* 0x6100e0000 */
+       { TARGET_DEV2G5 + 44,        0x100e4000, 1 }, /* 0x6100e4000 */
+       { TARGET_DEV2G5 + 45,        0x100e8000, 1 }, /* 0x6100e8000 */
+       { TARGET_DEV2G5 + 46,        0x100ec000, 1 }, /* 0x6100ec000 */
+       { TARGET_DEV2G5 + 47,        0x100f0000, 1 }, /* 0x6100f0000 */
+       { TARGET_DEV2G5 + 57,        0x100f4000, 1 }, /* 0x6100f4000 */
+       { TARGET_DEV25G +  1,        0x100f8000, 1 }, /* 0x6100f8000 */
+       { TARGET_PCS25G_BR +  1,     0x100fc000, 1 }, /* 0x6100fc000 */
+       { TARGET_DEV2G5 + 59,        0x10104000, 1 }, /* 0x610104000 */
+       { TARGET_DEV25G +  3,        0x10108000, 1 }, /* 0x610108000 */
+       { TARGET_PCS25G_BR +  3,     0x1010c000, 1 }, /* 0x61010c000 */
+       { TARGET_DEV2G5 + 60,        0x10114000, 1 }, /* 0x610114000 */
+       { TARGET_DEV25G +  4,        0x10118000, 1 }, /* 0x610118000 */
+       { TARGET_PCS25G_BR +  4,     0x1011c000, 1 }, /* 0x61011c000 */
+       { TARGET_DEV2G5 + 64,        0x10124000, 1 }, /* 0x610124000 */
+       { TARGET_DEV5G + 12,         0x10128000, 1 }, /* 0x610128000 */
+       { TARGET_PCS5G_BR + 12,      0x1012c000, 1 }, /* 0x61012c000 */
+       { TARGET_PORT_CONF,          0x10130000, 1 }, /* 0x610130000 */
+       { TARGET_DEV2G5 +  3,        0x10404000, 1 }, /* 0x610404000 */
+       { TARGET_DEV5G +  3,         0x10408000, 1 }, /* 0x610408000 */
+       { TARGET_PCS5G_BR +  3,      0x1040c000, 1 }, /* 0x61040c000 */
+       { TARGET_DEV2G5 +  4,        0x10410000, 1 }, /* 0x610410000 */
+       { TARGET_DEV5G +  4,         0x10414000, 1 }, /* 0x610414000 */
+       { TARGET_PCS5G_BR +  4,      0x10418000, 1 }, /* 0x610418000 */
+       { TARGET_DEV2G5 +  5,        0x1041c000, 1 }, /* 0x61041c000 */
+       { TARGET_DEV5G +  5,         0x10420000, 1 }, /* 0x610420000 */
+       { TARGET_PCS5G_BR +  5,      0x10424000, 1 }, /* 0x610424000 */
+       { TARGET_DEV2G5 + 13,        0x10428000, 1 }, /* 0x610428000 */
+       { TARGET_DEV10G +  1,        0x1042c000, 1 }, /* 0x61042c000 */
+       { TARGET_PCS10G_BR +  1,     0x10430000, 1 }, /* 0x610430000 */
+       { TARGET_DEV2G5 + 24,        0x10434000, 1 }, /* 0x610434000 */
+       { TARGET_DEV2G5 + 25,        0x10438000, 1 }, /* 0x610438000 */
+       { TARGET_DEV2G5 + 26,        0x1043c000, 1 }, /* 0x61043c000 */
+       { TARGET_DEV2G5 + 27,        0x10440000, 1 }, /* 0x610440000 */
+       { TARGET_DEV2G5 + 28,        0x10444000, 1 }, /* 0x610444000 */
+       { TARGET_DEV2G5 + 29,        0x10448000, 1 }, /* 0x610448000 */
+       { TARGET_DEV2G5 + 30,        0x1044c000, 1 }, /* 0x61044c000 */
+       { TARGET_DEV2G5 + 31,        0x10450000, 1 }, /* 0x610450000 */
+       { TARGET_DEV2G5 + 48,        0x10454000, 1 }, /* 0x610454000 */
+       { TARGET_DEV10G +  4,        0x10458000, 1 }, /* 0x610458000 */
+       { TARGET_PCS10G_BR +  4,     0x1045c000, 1 }, /* 0x61045c000 */
+       { TARGET_DEV2G5 + 49,        0x10460000, 1 }, /* 0x610460000 */
+       { TARGET_DEV10G +  5,        0x10464000, 1 }, /* 0x610464000 */
+       { TARGET_PCS10G_BR +  5,     0x10468000, 1 }, /* 0x610468000 */
+       { TARGET_DEV2G5 + 50,        0x1046c000, 1 }, /* 0x61046c000 */
+       { TARGET_DEV10G +  6,        0x10470000, 1 }, /* 0x610470000 */
+       { TARGET_PCS10G_BR +  6,     0x10474000, 1 }, /* 0x610474000 */
+       { TARGET_DEV2G5 + 51,        0x10478000, 1 }, /* 0x610478000 */
+       { TARGET_DEV10G +  7,        0x1047c000, 1 }, /* 0x61047c000 */
+       { TARGET_PCS10G_BR +  7,     0x10480000, 1 }, /* 0x610480000 */
+       { TARGET_DEV2G5 + 52,        0x10484000, 1 }, /* 0x610484000 */
+       { TARGET_DEV10G +  8,        0x10488000, 1 }, /* 0x610488000 */
+       { TARGET_PCS10G_BR +  8,     0x1048c000, 1 }, /* 0x61048c000 */
+       { TARGET_DEV2G5 + 53,        0x10490000, 1 }, /* 0x610490000 */
+       { TARGET_DEV10G +  9,        0x10494000, 1 }, /* 0x610494000 */
+       { TARGET_PCS10G_BR +  9,     0x10498000, 1 }, /* 0x610498000 */
+       { TARGET_DEV2G5 + 54,        0x1049c000, 1 }, /* 0x61049c000 */
+       { TARGET_DEV10G + 10,        0x104a0000, 1 }, /* 0x6104a0000 */
+       { TARGET_PCS10G_BR + 10,     0x104a4000, 1 }, /* 0x6104a4000 */
+       { TARGET_DEV2G5 + 55,        0x104a8000, 1 }, /* 0x6104a8000 */
+       { TARGET_DEV10G + 11,        0x104ac000, 1 }, /* 0x6104ac000 */
+       { TARGET_PCS10G_BR + 11,     0x104b0000, 1 }, /* 0x6104b0000 */
+       { TARGET_DEV2G5 + 56,        0x104b4000, 1 }, /* 0x6104b4000 */
+       { TARGET_DEV25G,             0x104b8000, 1 }, /* 0x6104b8000 */
+       { TARGET_PCS25G_BR,          0x104bc000, 1 }, /* 0x6104bc000 */
+       { TARGET_DEV2G5 + 58,        0x104c4000, 1 }, /* 0x6104c4000 */
+       { TARGET_DEV25G +  2,        0x104c8000, 1 }, /* 0x6104c8000 */
+       { TARGET_PCS25G_BR +  2,     0x104cc000, 1 }, /* 0x6104cc000 */
+       { TARGET_DEV2G5 + 61,        0x104d4000, 1 }, /* 0x6104d4000 */
+       { TARGET_DEV25G +  5,        0x104d8000, 1 }, /* 0x6104d8000 */
+       { TARGET_PCS25G_BR +  5,     0x104dc000, 1 }, /* 0x6104dc000 */
+       { TARGET_DEV2G5 + 62,        0x104e4000, 1 }, /* 0x6104e4000 */
+       { TARGET_DEV25G +  6,        0x104e8000, 1 }, /* 0x6104e8000 */
+       { TARGET_PCS25G_BR +  6,     0x104ec000, 1 }, /* 0x6104ec000 */
+       { TARGET_DEV2G5 + 63,        0x104f4000, 1 }, /* 0x6104f4000 */
+       { TARGET_DEV25G +  7,        0x104f8000, 1 }, /* 0x6104f8000 */
+       { TARGET_PCS25G_BR +  7,     0x104fc000, 1 }, /* 0x6104fc000 */
+       { TARGET_DSM,                0x10504000, 1 }, /* 0x610504000 */
+       { TARGET_ASM,                0x10600000, 1 }, /* 0x610600000 */
+       { TARGET_GCB,                0x11010000, 2 }, /* 0x611010000 */
+       { TARGET_QS,                 0x11030000, 2 }, /* 0x611030000 */
+       { TARGET_ANA_ACL,            0x11050000, 2 }, /* 0x611050000 */
+       { TARGET_LRN,                0x11060000, 2 }, /* 0x611060000 */
+       { TARGET_VCAP_SUPER,         0x11080000, 2 }, /* 0x611080000 */
+       { TARGET_QSYS,               0x110a0000, 2 }, /* 0x6110a0000 */
+       { TARGET_QFWD,               0x110b0000, 2 }, /* 0x6110b0000 */
+       { TARGET_XQS,                0x110c0000, 2 }, /* 0x6110c0000 */
+       { TARGET_CLKGEN,             0x11100000, 2 }, /* 0x611100000 */
+       { TARGET_ANA_AC_POL,         0x11200000, 2 }, /* 0x611200000 */
+       { TARGET_QRES,               0x11280000, 2 }, /* 0x611280000 */
+       { TARGET_EACL,               0x112c0000, 2 }, /* 0x6112c0000 */
+       { TARGET_ANA_CL,             0x11400000, 2 }, /* 0x611400000 */
+       { TARGET_ANA_L3,             0x11480000, 2 }, /* 0x611480000 */
+       { TARGET_HSCH,               0x11580000, 2 }, /* 0x611580000 */
+       { TARGET_REW,                0x11600000, 2 }, /* 0x611600000 */
+       { TARGET_ANA_L2,             0x11800000, 2 }, /* 0x611800000 */
+       { TARGET_ANA_AC,             0x11900000, 2 }, /* 0x611900000 */
+       { TARGET_VOP,                0x11a00000, 2 }, /* 0x611a00000 */
+};
+
+static int sparx5_create_targets(struct sparx5 *sparx5)
+{
+       struct resource *iores[IO_RANGES];
+       void __iomem *iomem[IO_RANGES];
+       void __iomem *begin[IO_RANGES];
+       int range_id[IO_RANGES];
+       int idx, jdx;
+
+       for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+               const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+               if (idx == iomap->range) {
+                       range_id[idx] = jdx;
+                       idx++;
+               }
+       }
+       for (idx = 0; idx < IO_RANGES; idx++) {
+               iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM,
+                                                  idx);
+               if (!iores[idx]) {
+                       dev_err(sparx5->dev, "Invalid resource\n");
+                       return -EINVAL;
+               }
+               iomem[idx] = devm_ioremap(sparx5->dev,
+                                         iores[idx]->start,
+                                         iores[idx]->end - iores[idx]->start
+                                         + 1);
+               if (!iomem[idx]) {
+                       dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
+                               iores[idx]->name);
+                       return -ENOMEM;
+               }
+               begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset;
+       }
+       for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
+               const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+
+               sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+       }
+       return 0;
+}
+
+static int sparx5_create_port(struct sparx5 *sparx5,
+                             struct initial_port_config *config)
+{
+       struct sparx5_port *spx5_port;
+       struct net_device *ndev;
+       struct phylink *phylink;
+       int err;
+
+       ndev = sparx5_create_netdev(sparx5, config->portno);
+       if (IS_ERR(ndev)) {
+               dev_err(sparx5->dev, "Could not create net device: %02u\n",
+                       config->portno);
+               return PTR_ERR(ndev);
+       }
+       spx5_port = netdev_priv(ndev);
+       spx5_port->of_node = config->node;
+       spx5_port->serdes = config->serdes;
+       spx5_port->pvid = NULL_VID;
+       spx5_port->signd_internal = true;
+       spx5_port->signd_active_high = true;
+       spx5_port->signd_enable = true;
+       spx5_port->max_vlan_tags = SPX5_PORT_MAX_TAGS_NONE;
+       spx5_port->vlan_type = SPX5_VLAN_PORT_TYPE_UNAWARE;
+       spx5_port->custom_etype = 0x8880; /* Vitesse */
+       spx5_port->phylink_pcs.poll = true;
+       spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
+       sparx5->ports[config->portno] = spx5_port;
+
+       err = sparx5_port_init(sparx5, spx5_port, &config->conf);
+       if (err) {
+               dev_err(sparx5->dev, "port init failed\n");
+               return err;
+       }
+       spx5_port->conf = config->conf;
+
+       /* Setup VLAN */
+       sparx5_vlan_port_setup(sparx5, spx5_port->portno);
+
+       /* Create a phylink for PHY management.  Also handles SFPs */
+       spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
+       spx5_port->phylink_config.type = PHYLINK_NETDEV;
+       spx5_port->phylink_config.pcs_poll = true;
+
+       phylink = phylink_create(&spx5_port->phylink_config,
+                                of_fwnode_handle(config->node),
+                                config->conf.phy_mode,
+                                &sparx5_phylink_mac_ops);
+       if (IS_ERR(phylink))
+               return PTR_ERR(phylink);
+
+       spx5_port->phylink = phylink;
+       phylink_set_pcs(phylink, &spx5_port->phylink_pcs);
+
+       return 0;
+}
+
+static int sparx5_init_ram(struct sparx5 *s5)
+{
+       const struct sparx5_ram_config spx5_ram_cfg[] = {
+               {spx5_reg_get(s5, ANA_AC_STAT_RESET), ANA_AC_STAT_RESET_RESET},
+               {spx5_reg_get(s5, ASM_STAT_CFG), ASM_STAT_CFG_STAT_CNT_CLR_SHOT},
+               {spx5_reg_get(s5, QSYS_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, REW_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, VOP_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, ANA_AC_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, ASM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, EACL_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, VCAP_SUPER_RAM_INIT), QSYS_RAM_INIT_RAM_INIT},
+               {spx5_reg_get(s5, DSM_RAM_INIT), QSYS_RAM_INIT_RAM_INIT}
+       };
+       const struct sparx5_ram_config *cfg;
+       u32 value, pending, jdx, idx;
+
+       for (jdx = 0; jdx < 10; jdx++) {
+               pending = ARRAY_SIZE(spx5_ram_cfg);
+               for (idx = 0; idx < ARRAY_SIZE(spx5_ram_cfg); idx++) {
+                       cfg = &spx5_ram_cfg[idx];
+                       if (jdx == 0) {
+                               writel(cfg->init_val, cfg->init_reg);
+                       } else {
+                               value = readl(cfg->init_reg);
+                               if ((value & cfg->init_val) != cfg->init_val)
+                                       pending--;
+                       }
+               }
+               if (!pending)
+                       break;
+               usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+       }
+
+       if (pending > 0) {
+               /* Still initializing, should be complete in
+                * less than 1ms
+                */
+               dev_err(s5->dev, "Memory initialization error\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int sparx5_init_switchcore(struct sparx5 *sparx5)
+{
+       u32 value;
+       int err = 0;
+
+       spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(1),
+                EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+                sparx5,
+                EACL_POL_EACL_CFG);
+
+       spx5_rmw(EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(0),
+                EACL_POL_EACL_CFG_EACL_FORCE_INIT,
+                sparx5,
+                EACL_POL_EACL_CFG);
+
+       /* Initialize memories, if not done already */
+       value = spx5_rd(sparx5, HSCH_RESET_CFG);
+       if (!(value & HSCH_RESET_CFG_CORE_ENA)) {
+               err = sparx5_init_ram(sparx5);
+               if (err)
+                       return err;
+       }
+
+       /* Reset counters */
+       spx5_wr(ANA_AC_STAT_RESET_RESET_SET(1), sparx5, ANA_AC_STAT_RESET);
+       spx5_wr(ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(1), sparx5, ASM_STAT_CFG);
+
+       /* Enable switch-core and queue system */
+       spx5_wr(HSCH_RESET_CFG_CORE_ENA_SET(1), sparx5, HSCH_RESET_CFG);
+
+       return 0;
+}
+
+static int sparx5_init_coreclock(struct sparx5 *sparx5)
+{
+       enum sparx5_core_clockfreq freq = sparx5->coreclock;
+       u32 clk_div, clk_period, pol_upd_int, idx;
+
+       /* Verify if core clock frequency is supported on target.
+        * If 'VTSS_CORE_CLOCK_DEFAULT' then the highest supported
+        * freq. is used
+        */
+       switch (sparx5->target_ct) {
+       case SPX5_TARGET_CT_7546:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_250MHZ;
+               else if (sparx5->coreclock != SPX5_CORE_CLOCK_250MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       case SPX5_TARGET_CT_7549:
+       case SPX5_TARGET_CT_7552:
+       case SPX5_TARGET_CT_7556:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_500MHZ;
+               else if (sparx5->coreclock != SPX5_CORE_CLOCK_500MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       case SPX5_TARGET_CT_7558:
+       case SPX5_TARGET_CT_7558TSN:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_625MHZ;
+               else if (sparx5->coreclock != SPX5_CORE_CLOCK_625MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       case SPX5_TARGET_CT_7546TSN:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_625MHZ;
+               break;
+       case SPX5_TARGET_CT_7549TSN:
+       case SPX5_TARGET_CT_7552TSN:
+       case SPX5_TARGET_CT_7556TSN:
+               if (sparx5->coreclock == SPX5_CORE_CLOCK_DEFAULT)
+                       freq = SPX5_CORE_CLOCK_625MHZ;
+               else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ)
+                       freq = 0; /* Not supported */
+               break;
+       default:
+               dev_err(sparx5->dev, "Target (%#04x) not supported\n",
+                       sparx5->target_ct);
+               return -ENODEV;
+       }
+
+       switch (freq) {
+       case SPX5_CORE_CLOCK_250MHZ:
+               clk_div = 10;
+               pol_upd_int = 312;
+               break;
+       case SPX5_CORE_CLOCK_500MHZ:
+               clk_div = 5;
+               pol_upd_int = 624;
+               break;
+       case SPX5_CORE_CLOCK_625MHZ:
+               clk_div = 4;
+               pol_upd_int = 780;
+               break;
+       default:
+               dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n",
+                       sparx5->coreclock, sparx5->target_ct);
+               return -EINVAL;
+       }
+
+       /* Update state with chosen frequency */
+       sparx5->coreclock = freq;
+
+       /* Configure the LCPLL */
+       spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
+                CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
+                sparx5,
+                CLKGEN_LCPLL1_CORE_CLK_CFG);
+
+       clk_period = sparx5_clk_period(freq);
+
+       spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100),
+                HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS,
+                sparx5,
+                HSCH_SYS_CLK_PER);
+
+       spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+                ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS,
+                sparx5,
+                ANA_AC_POL_BDLB_DLB_CTRL);
+
+       spx5_rmw(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
+                ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS,
+                sparx5,
+                ANA_AC_POL_SLB_DLB_CTRL);
+
+       spx5_rmw(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(clk_period / 100),
+                LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS,
+                sparx5,
+                LRN_AUTOAGE_CFG_1);
+
+       for (idx = 0; idx < 3; idx++)
+               spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100),
+                        GCB_SIO_CLOCK_SYS_CLK_PERIOD,
+                        sparx5,
+                        GCB_SIO_CLOCK(idx));
+
+       spx5_rmw(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET
+                ((256 * 1000) / clk_period),
+                HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY,
+                sparx5,
+                HSCH_TAS_STATEMACHINE_CFG);
+
+       spx5_rmw(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(pol_upd_int),
+                ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT,
+                sparx5,
+                ANA_AC_POL_POL_UPD_INT_CFG);
+
+       return 0;
+}
+
+static int sparx5_qlim_set(struct sparx5 *sparx5)
+{
+       u32 res, dp, prio;
+
+       for (res = 0; res < 2; res++) {
+               for (prio = 0; prio < 8; prio++)
+                       spx5_wr(0xFFF, sparx5,
+                               QRES_RES_CFG(prio + 630 + res * 1024));
+
+               for (dp = 0; dp < 4; dp++)
+                       spx5_wr(0xFFF, sparx5,
+                               QRES_RES_CFG(dp + 638 + res * 1024));
+       }
+
+       /* Set 80,90,95,100% of memory size for top watermarks */
+       spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
+       spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
+       spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
+       spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
+
+       return 0;
+}
+
+/* Some boards needs to map the SGPIO for signal detect explicitly to the
+ * port module
+ */
+static void sparx5_board_init(struct sparx5 *sparx5)
+{
+       int idx;
+
+       if (!sparx5->sd_sgpio_remapping)
+               return;
+
+       /* Enable SGPIO Signal Detect remapping */
+       spx5_rmw(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+                GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL,
+                sparx5,
+                GCB_HW_SGPIO_SD_CFG);
+
+       /* Refer to LOS SGPIO */
+       for (idx = 0; idx < SPX5_PORTS; idx++)
+               if (sparx5->ports[idx])
+                       if (sparx5->ports[idx]->conf.sd_sgpio != ~0)
+                               spx5_wr(sparx5->ports[idx]->conf.sd_sgpio,
+                                       sparx5,
+                                       GCB_HW_SGPIO_TO_SD_MAP_CFG(idx));
+}
+
+static int sparx5_start(struct sparx5 *sparx5)
+{
+       u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       char queue_name[32];
+       u32 idx;
+       int err;
+
+       /* Setup own UPSIDs */
+       for (idx = 0; idx < 3; idx++) {
+               spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx));
+               spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx));
+               spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx));
+               spx5_wr(idx, sparx5, REW_OWN_UPSID(idx));
+       }
+
+       /* Enable CPU ports */
+       for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++)
+               spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1),
+                        QFWD_SWITCH_PORT_MODE_PORT_ENA,
+                        sparx5,
+                        QFWD_SWITCH_PORT_MODE(idx));
+
+       /* Init masks */
+       sparx5_update_fwd(sparx5);
+
+       /* CPU copy CPU pgids */
+       spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+               sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU));
+       spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
+               sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST));
+
+       /* Recalc injected frame FCS */
+       for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++)
+               spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1),
+                        ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA,
+                        sparx5, ANA_CL_FILTER_CTRL(idx));
+
+       /* Init MAC table, ageing */
+       sparx5_mact_init(sparx5);
+
+       /* Setup VLANs */
+       sparx5_vlan_init(sparx5);
+
+       /* Add host mode BC address (points only to CPU) */
+       sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID);
+
+       /* Enable queue limitation watermarks */
+       sparx5_qlim_set(sparx5);
+
+       err = sparx5_config_auto_calendar(sparx5);
+       if (err)
+               return err;
+
+       err = sparx5_config_dsm_calendar(sparx5);
+       if (err)
+               return err;
+
+       /* Init stats */
+       err = sparx_stats_init(sparx5);
+       if (err)
+               return err;
+
+       /* Init mact_sw struct */
+       mutex_init(&sparx5->mact_lock);
+       INIT_LIST_HEAD(&sparx5->mact_entries);
+       snprintf(queue_name, sizeof(queue_name), "%s-mact",
+                dev_name(sparx5->dev));
+       sparx5->mact_queue = create_singlethread_workqueue(queue_name);
+       INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work);
+       queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
+                          SPX5_MACT_PULL_DELAY);
+
+       err = sparx5_register_netdevs(sparx5);
+       if (err)
+               return err;
+
+       sparx5_board_init(sparx5);
+       err = sparx5_register_notifier_blocks(sparx5);
+
+       /* Start register based INJ/XTR */
+       err = -ENXIO;
+       if (err && sparx5->xtr_irq >= 0) {
+               err = devm_request_irq(sparx5->dev, sparx5->xtr_irq,
+                                      sparx5_xtr_handler, IRQF_SHARED,
+                                      "sparx5-xtr", sparx5);
+               if (!err)
+                       err = sparx5_manual_injection_mode(sparx5);
+               if (err)
+                       sparx5->xtr_irq = -ENXIO;
+       } else {
+               sparx5->xtr_irq = -ENXIO;
+       }
+       return err;
+}
+
+static void sparx5_cleanup_ports(struct sparx5 *sparx5)
+{
+       sparx5_unregister_netdevs(sparx5);
+       sparx5_destroy_netdevs(sparx5);
+}
+
+static int mchp_sparx5_probe(struct platform_device *pdev)
+{
+       struct initial_port_config *configs, *config;
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *ports, *portnp;
+       struct reset_control *reset;
+       struct sparx5 *sparx5;
+       int idx = 0, err = 0;
+       u8 *mac_addr;
+
+       if (!np && !pdev->dev.platform_data)
+               return -ENODEV;
+
+       sparx5 = devm_kzalloc(&pdev->dev, sizeof(*sparx5), GFP_KERNEL);
+       if (!sparx5)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, sparx5);
+       sparx5->pdev = pdev;
+       sparx5->dev = &pdev->dev;
+
+       /* Do switch core reset if available */
+       reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
+       if (IS_ERR(reset))
+               return dev_err_probe(&pdev->dev, PTR_ERR(reset),
+                                    "Failed to get switch reset controller.\n");
+       reset_control_reset(reset);
+
+       /* Default values, some from DT */
+       sparx5->coreclock = SPX5_CORE_CLOCK_DEFAULT;
+
+       ports = of_get_child_by_name(np, "ethernet-ports");
+       if (!ports) {
+               dev_err(sparx5->dev, "no ethernet-ports child node found\n");
+               return -ENODEV;
+       }
+       sparx5->port_count = of_get_child_count(ports);
+
+       configs = kcalloc(sparx5->port_count,
+                         sizeof(struct initial_port_config), GFP_KERNEL);
+       if (!configs) {
+               err = -ENOMEM;
+               goto cleanup_pnode;
+       }
+
+       for_each_available_child_of_node(ports, portnp) {
+               struct sparx5_port_config *conf;
+               struct phy *serdes;
+               u32 portno;
+
+               err = of_property_read_u32(portnp, "reg", &portno);
+               if (err) {
+                       dev_err(sparx5->dev, "port reg property error\n");
+                       continue;
+               }
+               config = &configs[idx];
+               conf = &config->conf;
+               conf->speed = SPEED_UNKNOWN;
+               conf->bandwidth = SPEED_UNKNOWN;
+               err = of_get_phy_mode(portnp, &conf->phy_mode);
+               if (err) {
+                       dev_err(sparx5->dev, "port %u: missing phy-mode\n",
+                               portno);
+                       continue;
+               }
+               err = of_property_read_u32(portnp, "microchip,bandwidth",
+                                          &conf->bandwidth);
+               if (err) {
+                       dev_err(sparx5->dev, "port %u: missing bandwidth\n",
+                               portno);
+                       continue;
+               }
+               err = of_property_read_u32(portnp, "microchip,sd-sgpio", &conf->sd_sgpio);
+               if (err)
+                       conf->sd_sgpio = ~0;
+               else
+                       sparx5->sd_sgpio_remapping = true;
+               serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+               if (IS_ERR(serdes)) {
+                       err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
+                                           "port %u: missing serdes\n",
+                                           portno);
+                       goto cleanup_config;
+               }
+               config->portno = portno;
+               config->node = portnp;
+               config->serdes = serdes;
+
+               conf->media = PHY_MEDIA_DAC;
+               conf->serdes_reset = true;
+               conf->portmode = conf->phy_mode;
+               conf->power_down = true;
+               idx++;
+       }
+
+       err = sparx5_create_targets(sparx5);
+       if (err)
+               goto cleanup_config;
+
+       if (of_get_mac_address(np, mac_addr)) {
+               dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n");
+               eth_random_addr(sparx5->base_mac);
+               sparx5->base_mac[5] = 0;
+       } else {
+               ether_addr_copy(sparx5->base_mac, mac_addr);
+       }
+
+       sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr");
+
+       /* Read chip ID to check CPU interface */
+       sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID);
+
+       sparx5->target_ct = (enum spx5_target_chiptype)
+               GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id);
+
+       /* Initialize Switchcore and internal RAMs */
+       err = sparx5_init_switchcore(sparx5);
+       if (err) {
+               dev_err(sparx5->dev, "Switchcore initialization error\n");
+               goto cleanup_config;
+       }
+
+       /* Initialize the LC-PLL (core clock) and set affected registers */
+       err = sparx5_init_coreclock(sparx5);
+       if (err) {
+               dev_err(sparx5->dev, "LC-PLL initialization error\n");
+               goto cleanup_config;
+       }
+
+       for (idx = 0; idx < sparx5->port_count; ++idx) {
+               config = &configs[idx];
+               if (!config->node)
+                       continue;
+
+               err = sparx5_create_port(sparx5, config);
+               if (err) {
+                       dev_err(sparx5->dev, "port create error\n");
+                       goto cleanup_ports;
+               }
+       }
+
+       err = sparx5_start(sparx5);
+       if (err) {
+               dev_err(sparx5->dev, "Start failed\n");
+               goto cleanup_ports;
+       }
+       goto cleanup_config;
+
+cleanup_ports:
+       sparx5_cleanup_ports(sparx5);
+cleanup_config:
+       kfree(configs);
+cleanup_pnode:
+       of_node_put(ports);
+       return err;
+}
+
+static int mchp_sparx5_remove(struct platform_device *pdev)
+{
+       struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+
+       if (sparx5->xtr_irq) {
+               disable_irq(sparx5->xtr_irq);
+               sparx5->xtr_irq = -ENXIO;
+       }
+       sparx5_cleanup_ports(sparx5);
+       /* Unregister netdevs */
+       sparx5_unregister_notifier_blocks(sparx5);
+
+       return 0;
+}
+
+static const struct of_device_id mchp_sparx5_match[] = {
+       { .compatible = "microchip,sparx5-switch" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
+
+static struct platform_driver mchp_sparx5_driver = {
+       .probe = mchp_sparx5_probe,
+       .remove = mchp_sparx5_remove,
+       .driver = {
+               .name = "sparx5-switch",
+               .of_match_table = mchp_sparx5_match,
+       },
+};
+
+module_platform_driver(mchp_sparx5_driver);
+
+MODULE_DESCRIPTION("Microchip Sparx5 switch driver");
+MODULE_AUTHOR("Steen Hegelund <steen.hegelund@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
new file mode 100644 (file)
index 0000000..4d5f44c
--- /dev/null
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_MAIN_H__
+#define __SPARX5_MAIN_H__
+
+#include <linux/types.h>
+#include <linux/phy/phy.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitmap.h>
+#include <linux/phylink.h>
+#include <linux/hrtimer.h>
+
+/* Target chip type */
+enum spx5_target_chiptype {
+       SPX5_TARGET_CT_7546    = 0x7546,  /* SparX-5-64  Enterprise */
+       SPX5_TARGET_CT_7549    = 0x7549,  /* SparX-5-90  Enterprise */
+       SPX5_TARGET_CT_7552    = 0x7552,  /* SparX-5-128 Enterprise */
+       SPX5_TARGET_CT_7556    = 0x7556,  /* SparX-5-160 Enterprise */
+       SPX5_TARGET_CT_7558    = 0x7558,  /* SparX-5-200 Enterprise */
+       SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
+       SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
+       SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
+       SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
+       SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+};
+
+enum sparx5_port_max_tags {
+       SPX5_PORT_MAX_TAGS_NONE,  /* No extra tags allowed */
+       SPX5_PORT_MAX_TAGS_ONE,   /* Single tag allowed */
+       SPX5_PORT_MAX_TAGS_TWO    /* Single and double tag allowed */
+};
+
+enum sparx5_vlan_port_type {
+       SPX5_VLAN_PORT_TYPE_UNAWARE, /* VLAN unaware port */
+       SPX5_VLAN_PORT_TYPE_C,       /* C-port */
+       SPX5_VLAN_PORT_TYPE_S,       /* S-port */
+       SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */
+};
+
+#define SPX5_PORTS             65
+#define SPX5_PORT_CPU          (SPX5_PORTS)  /* Next port is CPU port */
+#define SPX5_PORT_CPU_0        (SPX5_PORT_CPU + 0) /* CPU Port 65 */
+#define SPX5_PORT_CPU_1        (SPX5_PORT_CPU + 1) /* CPU Port 66 */
+#define SPX5_PORT_VD0          (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */
+#define SPX5_PORT_VD1          (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */
+#define SPX5_PORT_VD2          (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/
+#define SPX5_PORTS_ALL         (SPX5_PORT_CPU + 5) /* Total number of ports */
+
+#define PGID_BASE              SPX5_PORTS /* Starts after port PGIDs */
+#define PGID_UC_FLOOD          (PGID_BASE + 0)
+#define PGID_MC_FLOOD          (PGID_BASE + 1)
+#define PGID_IPV4_MC_DATA      (PGID_BASE + 2)
+#define PGID_IPV4_MC_CTRL      (PGID_BASE + 3)
+#define PGID_IPV6_MC_DATA      (PGID_BASE + 4)
+#define PGID_IPV6_MC_CTRL      (PGID_BASE + 5)
+#define PGID_BCAST            (PGID_BASE + 6)
+#define PGID_CPU              (PGID_BASE + 7)
+
+#define IFH_LEN                9 /* 36 bytes */
+#define NULL_VID               0
+#define SPX5_MACT_PULL_DELAY   (2 * HZ)
+#define SPX5_STATS_CHECK_DELAY (1 * HZ)
+#define SPX5_PRIOS             8     /* Number of priority queues */
+#define SPX5_BUFFER_CELL_SZ    184   /* Cell size  */
+#define SPX5_BUFFER_MEMORY     4194280 /* 22795 words * 184 bytes */
+
+#define XTR_QUEUE     0
+#define INJ_QUEUE     0
+
+struct sparx5;
+
+struct sparx5_port_config {
+       phy_interface_t portmode;
+       u32 bandwidth;
+       int speed;
+       int duplex;
+       enum phy_media media;
+       bool inband;
+       bool power_down;
+       bool autoneg;
+       bool serdes_reset;
+       u32 pause;
+       u32 pause_adv;
+       phy_interface_t phy_mode;
+       u32 sd_sgpio;
+};
+
+struct sparx5_port {
+       struct net_device *ndev;
+       struct sparx5 *sparx5;
+       struct device_node *of_node;
+       struct phy *serdes;
+       struct sparx5_port_config conf;
+       struct phylink_config phylink_config;
+       struct phylink *phylink;
+       struct phylink_pcs phylink_pcs;
+       u16 portno;
+       /* Ingress default VLAN (pvid) */
+       u16 pvid;
+       /* Egress default VLAN (vid) */
+       u16 vid;
+       bool signd_internal;
+       bool signd_active_high;
+       bool signd_enable;
+       bool flow_control;
+       enum sparx5_port_max_tags max_vlan_tags;
+       enum sparx5_vlan_port_type vlan_type;
+       u32 custom_etype;
+       u32 ifh[IFH_LEN];
+       bool vlan_aware;
+       struct hrtimer inj_timer;
+};
+
+enum sparx5_core_clockfreq {
+       SPX5_CORE_CLOCK_DEFAULT,  /* Defaults to the highest supported frequency */
+       SPX5_CORE_CLOCK_250MHZ,   /* 250MHZ core clock frequency */
+       SPX5_CORE_CLOCK_500MHZ,   /* 500MHZ core clock frequency */
+       SPX5_CORE_CLOCK_625MHZ,   /* 625MHZ core clock frequency */
+};
+
+struct sparx5 {
+       struct platform_device *pdev;
+       struct device *dev;
+       u32 chip_id;
+       enum spx5_target_chiptype target_ct;
+       void __iomem *regs[NUM_TARGETS];
+       int port_count;
+       struct mutex lock; /* MAC reg lock */
+       /* port structures are in net device */
+       struct sparx5_port *ports[SPX5_PORTS];
+       enum sparx5_core_clockfreq coreclock;
+       /* Statistics */
+       u32 num_stats;
+       u32 num_ethtool_stats;
+       const char * const *stats_layout;
+       u64 *stats;
+       /* Workqueue for reading stats */
+       struct mutex queue_stats_lock;
+       struct delayed_work stats_work;
+       struct workqueue_struct *stats_queue;
+       /* Notifiers */
+       struct notifier_block netdevice_nb;
+       struct notifier_block switchdev_nb;
+       struct notifier_block switchdev_blocking_nb;
+       /* Switch state */
+       u8 base_mac[ETH_ALEN];
+       /* Associated bridge device (when bridged) */
+       struct net_device *hw_bridge_dev;
+       /* Bridged interfaces */
+       DECLARE_BITMAP(bridge_mask, SPX5_PORTS);
+       DECLARE_BITMAP(bridge_fwd_mask, SPX5_PORTS);
+       DECLARE_BITMAP(bridge_lrn_mask, SPX5_PORTS);
+       DECLARE_BITMAP(vlan_mask[VLAN_N_VID], SPX5_PORTS);
+       /* SW MAC table */
+       struct list_head mact_entries;
+       /* mac table list (mact_entries) mutex */
+       struct mutex mact_lock;
+       struct delayed_work mact_work;
+       struct workqueue_struct *mact_queue;
+       /* Board specifics */
+       bool sd_sgpio_remapping;
+       /* Register based inj/xtr */
+       int xtr_irq;
+};
+
+/* sparx5_switchdev.c */
+int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
+void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
+
+/* sparx5_packet.c */
+irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
+int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
+int sparx5_manual_injection_mode(struct sparx5 *sparx5);
+void sparx5_port_inj_timer_setup(struct sparx5_port *port);
+
+/* sparx5_mactable.c */
+void sparx5_mact_pull_work(struct work_struct *work);
+int sparx5_mact_learn(struct sparx5 *sparx5, int port,
+                     const unsigned char mac[ETH_ALEN], u16 vid);
+bool sparx5_mact_getnext(struct sparx5 *sparx5,
+                        unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+int sparx5_mact_forget(struct sparx5 *sparx5,
+                      const unsigned char mac[ETH_ALEN], u16 vid);
+int sparx5_add_mact_entry(struct sparx5 *sparx5,
+                         struct sparx5_port *port,
+                         const unsigned char *addr, u16 vid);
+int sparx5_del_mact_entry(struct sparx5 *sparx5,
+                         const unsigned char *addr,
+                         u16 vid);
+int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr);
+int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr);
+void sparx5_set_ageing(struct sparx5 *sparx5, int msecs);
+void sparx5_mact_init(struct sparx5 *sparx5);
+
+/* sparx5_vlan.c */
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
+void sparx5_update_fwd(struct sparx5 *sparx5);
+void sparx5_vlan_init(struct sparx5 *sparx5);
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno);
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+                       bool untagged);
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid);
+void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
+
+/* sparx5_calendar.c */
+int sparx5_config_auto_calendar(struct sparx5 *sparx5);
+int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
+
+/* sparx5_ethtool.c */
+void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
+int sparx_stats_init(struct sparx5 *sparx5);
+
+/* sparx5_netdev.c */
+bool sparx5_netdevice_check(const struct net_device *dev);
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
+int sparx5_register_netdevs(struct sparx5 *sparx5);
+void sparx5_destroy_netdevs(struct sparx5 *sparx5);
+void sparx5_unregister_netdevs(struct sparx5 *sparx5);
+
+/* Clock period in picoseconds */
+static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
+{
+       switch (cclock) {
+       case SPX5_CORE_CLOCK_250MHZ:
+               return 4000;
+       case SPX5_CORE_CLOCK_500MHZ:
+               return 2000;
+       case SPX5_CORE_CLOCK_625MHZ:
+       default:
+               return 1600;
+       }
+}
+
+static inline bool sparx5_is_baser(phy_interface_t interface)
+{
+       return interface == PHY_INTERFACE_MODE_5GBASER ||
+                  interface == PHY_INTERFACE_MODE_10GBASER ||
+                  interface == PHY_INTERFACE_MODE_25GBASER;
+}
+
+extern const struct phylink_mac_ops sparx5_phylink_mac_ops;
+extern const struct phylink_pcs_ops sparx5_phylink_pcs_ops;
+extern const struct ethtool_ops sparx5_ethtool_ops;
+
+/* Calculate raw offset */
+static inline __pure int spx5_offset(int id, int tinst, int tcnt,
+                                    int gbase, int ginst,
+                                    int gcnt, int gwidth,
+                                    int raddr, int rinst,
+                                    int rcnt, int rwidth)
+{
+       WARN_ON((tinst) >= tcnt);
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+/* Read, Write and modify registers content.
+ * The register definition macros start at the id
+ */
+static inline void __iomem *spx5_addr(void __iomem *base[],
+                                     int id, int tinst, int tcnt,
+                                     int gbase, int ginst,
+                                     int gcnt, int gwidth,
+                                     int raddr, int rinst,
+                                     int rcnt, int rwidth)
+{
+       WARN_ON((tinst) >= tcnt);
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return base[id + (tinst)] +
+               gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+static inline void __iomem *spx5_inst_addr(void __iomem *base,
+                                          int gbase, int ginst,
+                                          int gcnt, int gwidth,
+                                          int raddr, int rinst,
+                                          int rcnt, int rwidth)
+{
+       WARN_ON((ginst) >= gcnt);
+       WARN_ON((rinst) >= rcnt);
+       return base +
+               gbase + ((ginst) * gwidth) +
+               raddr + ((rinst) * rwidth);
+}
+
+static inline u32 spx5_rd(struct sparx5 *sparx5, int id, int tinst, int tcnt,
+                         int gbase, int ginst, int gcnt, int gwidth,
+                         int raddr, int rinst, int rcnt, int rwidth)
+{
+       return readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+                              gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline u32 spx5_inst_rd(void __iomem *iomem, int id, int tinst, int tcnt,
+                              int gbase, int ginst, int gcnt, int gwidth,
+                              int raddr, int rinst, int rcnt, int rwidth)
+{
+       return readl(spx5_inst_addr(iomem, gbase, ginst,
+                                    gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_wr(u32 val, struct sparx5 *sparx5,
+                          int id, int tinst, int tcnt,
+                          int gbase, int ginst, int gcnt, int gwidth,
+                          int raddr, int rinst, int rcnt, int rwidth)
+{
+       writel(val, spx5_addr(sparx5->regs, id, tinst, tcnt,
+                             gbase, ginst, gcnt, gwidth,
+                             raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_wr(u32 val, void __iomem *iomem,
+                               int id, int tinst, int tcnt,
+                               int gbase, int ginst, int gcnt, int gwidth,
+                               int raddr, int rinst, int rcnt, int rwidth)
+{
+       writel(val, spx5_inst_addr(iomem,
+                                  gbase, ginst, gcnt, gwidth,
+                                  raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_rmw(u32 val, u32 mask, struct sparx5 *sparx5,
+                           int id, int tinst, int tcnt,
+                           int gbase, int ginst, int gcnt, int gwidth,
+                           int raddr, int rinst, int rcnt, int rwidth)
+{
+       u32 nval;
+
+       nval = readl(spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+                              gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+       nval = (nval & ~mask) | (val & mask);
+       writel(nval, spx5_addr(sparx5->regs, id, tinst, tcnt, gbase, ginst,
+                              gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void spx5_inst_rmw(u32 val, u32 mask, void __iomem *iomem,
+                                int id, int tinst, int tcnt,
+                                int gbase, int ginst, int gcnt, int gwidth,
+                                int raddr, int rinst, int rcnt, int rwidth)
+{
+       u32 nval;
+
+       nval = readl(spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+                                   rinst, rcnt, rwidth));
+       nval = (nval & ~mask) | (val & mask);
+       writel(nval, spx5_inst_addr(iomem, gbase, ginst, gcnt, gwidth, raddr,
+                                   rinst, rcnt, rwidth));
+}
+
+static inline void __iomem *spx5_inst_get(struct sparx5 *sparx5, int id, int tinst)
+{
+       return sparx5->regs[id + tinst];
+}
+
+static inline void __iomem *spx5_reg_get(struct sparx5 *sparx5,
+                                        int id, int tinst, int tcnt,
+                                        int gbase, int ginst, int gcnt, int gwidth,
+                                        int raddr, int rinst, int rcnt, int rwidth)
+{
+       return spx5_addr(sparx5->regs, id, tinst, tcnt,
+                        gbase, ginst, gcnt, gwidth,
+                        raddr, rinst, rcnt, rwidth);
+}
+
+#endif /* __SPARX5_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
new file mode 100644 (file)
index 0000000..5ab2373
--- /dev/null
@@ -0,0 +1,4642 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ * Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2021-05-06 13:06:37 +0200.
+ * Commit ID: 9ae4ec441e25e4b9003f4e514df5cb12a36b84d3
+ */
+
+#ifndef _SPARX5_MAIN_REGS_H_
+#define _SPARX5_MAIN_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum sparx5_target {
+       TARGET_ANA_AC = 1,
+       TARGET_ANA_ACL = 2,
+       TARGET_ANA_AC_POL = 4,
+       TARGET_ANA_CL = 6,
+       TARGET_ANA_L2 = 7,
+       TARGET_ANA_L3 = 8,
+       TARGET_ASM = 9,
+       TARGET_CLKGEN = 11,
+       TARGET_CPU = 12,
+       TARGET_DEV10G = 17,
+       TARGET_DEV25G = 29,
+       TARGET_DEV2G5 = 37,
+       TARGET_DEV5G = 102,
+       TARGET_DSM = 115,
+       TARGET_EACL = 116,
+       TARGET_FDMA = 117,
+       TARGET_GCB = 118,
+       TARGET_HSCH = 119,
+       TARGET_LRN = 122,
+       TARGET_PCEP = 129,
+       TARGET_PCS10G_BR = 132,
+       TARGET_PCS25G_BR = 144,
+       TARGET_PCS5G_BR = 160,
+       TARGET_PORT_CONF = 173,
+       TARGET_QFWD = 175,
+       TARGET_QRES = 176,
+       TARGET_QS = 177,
+       TARGET_QSYS = 178,
+       TARGET_REW = 179,
+       TARGET_VCAP_SUPER = 326,
+       TARGET_VOP = 327,
+       TARGET_XQS = 331,
+       NUM_TARGETS = 332
+};
+
+#define __REG(...)    __VA_ARGS__
+
+/*      ANA_AC:RAM_CTRL:RAM_INIT */
+#define ANA_AC_RAM_INIT           __REG(TARGET_ANA_AC, 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_RAM_INIT_RAM_INIT                 BIT(1)
+#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(ANA_AC_RAM_INIT_RAM_INIT, x)
+#define ANA_AC_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(ANA_AC_RAM_INIT_RAM_INIT, x)
+
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK             BIT(0)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      ANA_AC:PS_COMMON:OWN_UPSID */
+#define ANA_AC_OWN_UPSID(r)       __REG(TARGET_ANA_AC, 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+
+#define ANA_AC_OWN_UPSID_OWN_UPSID               GENMASK(4, 0)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_AC:SRC:SRC_CFG */
+#define ANA_AC_SRC_CFG(g)         __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
+
+/*      ANA_AC:SRC:SRC_CFG1 */
+#define ANA_AC_SRC_CFG1(g)        __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
+
+/*      ANA_AC:SRC:SRC_CFG2 */
+#define ANA_AC_SRC_CFG2(g)        __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SRC_CFG2_PORT_MASK2               BIT(0)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
+       FIELD_PREP(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\
+       FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
+
+/*      ANA_AC:PGID:PGID_CFG */
+#define ANA_AC_PGID_CFG(g)        __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+
+/*      ANA_AC:PGID:PGID_CFG1 */
+#define ANA_AC_PGID_CFG1(g)       __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+
+/*      ANA_AC:PGID:PGID_CFG2 */
+#define ANA_AC_PGID_CFG2(g)       __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+
+#define ANA_AC_PGID_CFG2_PORT_MASK2              BIT(0)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\
+       FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
+
+/*      ANA_AC:PGID:PGID_MISC_CFG */
+#define ANA_AC_PGID_MISC_CFG(g)   __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU         GENMASK(6, 4)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_GET(x)\
+       FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_QU, x)
+
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA      BIT(1)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA_GET(x)\
+       FIELD_GET(ANA_AC_PGID_MISC_CFG_STACK_TYPE_ENA, x)
+
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA   BIT(0)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
+       FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+
+/*      ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_PORT_SGE_CFG(r)    __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+
+#define ANA_AC_PORT_SGE_CFG_MASK                 GENMASK(15, 0)
+#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x)
+#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\
+       FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
+
+/*      ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
+#define ANA_AC_STAT_RESET         __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+
+#define ANA_AC_STAT_RESET_RESET                  BIT(0)
+#define ANA_AC_STAT_RESET_RESET_SET(x)\
+       FIELD_PREP(ANA_AC_STAT_RESET_RESET, x)
+#define ANA_AC_STAT_RESET_RESET_GET(x)\
+       FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
+
+/*      ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
+#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK       GENMASK(11, 4)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_GET(x)\
+       FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE    GENMASK(3, 1)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE_GET(x)\
+       FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_FRM_TYPE, x)
+
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE        BIT(0)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_SET(x)\
+       FIELD_PREP(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\
+       FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
+
+/*      ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+
+/*      ANA_ACL:COMMON:OWN_UPSID */
+#define ANA_ACL_OWN_UPSID(r)      __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+
+#define ANA_ACL_OWN_UPSID_OWN_UPSID              GENMASK(4, 0)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
+#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT   GENMASK(9, 0)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
+       FIELD_PREP(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\
+       FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
+
+/*      ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
+#define ANA_AC_POL_BDLB_DLB_CTRL  __REG(TARGET_ANA_AC_POL, 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT   GENMASK(18, 4)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA        BIT(1)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA     BIT(0)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/*      ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
+#define ANA_AC_POL_SLB_DLB_CTRL   __REG(TARGET_ANA_AC_POL, 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS  GENMASK(26, 19)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT    GENMASK(18, 4)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_BASE_TICK_CNT, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA         BIT(1)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_LEAK_ENA, x)
+
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA      BIT(0)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_SET(x)\
+       FIELD_PREP(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
+       FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+
+/*      ANA_CL:PORT:FILTER_CTRL */
+#define ANA_CL_FILTER_CTRL(g)     __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS    BIT(2)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_GET(x)\
+       FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS   BIT(1)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+#define ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS_GET(x)\
+       FIELD_GET(ANA_CL_FILTER_CTRL_FILTER_NULL_MAC_DIS, x)
+
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA  BIT(0)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\
+       FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
+
+/*      ANA_CL:PORT:VLAN_FILTER_CTRL */
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS    BIT(9)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS         BIT(8)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CTAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS    BIT(7)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS BIT(6)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS BIT(5)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS BIT(4)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_PRIO_CUST3_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS         BIT(3)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS   BIT(2)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST1_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS   BIT(1)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST2_STAG_DIS, x)
+
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS   BIT(0)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
+
+/*      ANA_CL:PORT:ETAG_FILTER_CTRL */
+#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_GET(x)\
+       FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA, x)
+
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS         BIT(0)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\
+       FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
+
+/*      ANA_CL:PORT:VLAN_CTRL */
+#define ANA_CL_VLAN_CTRL(g)       __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP    GENMASK(25, 23)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI    BIT(22)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VOE_DEFAULT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA  BIT(21)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_PCP_DEI_TRANS_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL            BIT(20)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+#define ANA_CL_VLAN_CTRL_VLAN_TAG_SEL_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_TAG_SEL, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA          BIT(19)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+#define ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA, x)
+
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT            GENMASK(18, 17)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+#define ANA_CL_VLAN_CTRL_VLAN_POP_CNT_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_VLAN_POP_CNT, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE           BIT(16)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+#define ANA_CL_VLAN_CTRL_PORT_TAG_TYPE_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_TAG_TYPE, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_PCP                GENMASK(15, 13)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+#define ANA_CL_VLAN_CTRL_PORT_PCP_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_PCP, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_DEI                BIT(12)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+#define ANA_CL_VLAN_CTRL_PORT_DEI_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_DEI, x)
+
+#define ANA_CL_VLAN_CTRL_PORT_VID                GENMASK(11, 0)
+#define ANA_CL_VLAN_CTRL_PORT_VID_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_PORT_VID, x)
+#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
+
+/*      ANA_CL:PORT:VLAN_CTRL_2 */
+#define ANA_CL_VLAN_CTRL_2(g)     __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT         GENMASK(1, 0)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
+       FIELD_PREP(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\
+       FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
+
+/*      ANA_CL:PORT:CAPTURE_BPDU_CFG */
+#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+
+/*      ANA_CL:COMMON:OWN_UPSID */
+#define ANA_CL_OWN_UPSID(r)       __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+
+#define ANA_CL_OWN_UPSID_OWN_UPSID               GENMASK(4, 0)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_L2:COMMON:AUTO_LRN_CFG */
+#define ANA_L2_AUTO_LRN_CFG       __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+
+/*      ANA_L2:COMMON:AUTO_LRN_CFG1 */
+#define ANA_L2_AUTO_LRN_CFG1      __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+
+/*      ANA_L2:COMMON:AUTO_LRN_CFG2 */
+#define ANA_L2_AUTO_LRN_CFG2      __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2       BIT(0)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
+       FIELD_PREP(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\
+       FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
+
+/*      ANA_L2:COMMON:OWN_UPSID */
+#define ANA_L2_OWN_UPSID(r)       __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+
+#define ANA_L2_OWN_UPSID_OWN_UPSID               GENMASK(4, 0)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+
+/*      ANA_L3:COMMON:VLAN_CTRL */
+#define ANA_L3_VLAN_CTRL          __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+
+#define ANA_L3_VLAN_CTRL_VLAN_ENA                BIT(0)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
+
+/*      ANA_L3:VLAN:VLAN_CFG */
+#define ANA_L3_VLAN_CFG(g)        __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR            GENMASK(30, 24)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MSTP_PTR, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FID                 GENMASK(20, 8)
+#define ANA_L3_VLAN_CFG_VLAN_FID_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FID, x)
+#define ANA_L3_VLAN_CFG_VLAN_FID_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FID, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA      BIT(6)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_IGR_FILTER_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA         BIT(5)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_SEC_FWD_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS           BIT(4)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_FLOOD_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS             BIT(3)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+#define ANA_L3_VLAN_CFG_VLAN_LRN_DIS_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_LRN_DIS, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA            BIT(2)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_RLEG_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_RLEG_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA         BIT(1)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_PRIVATE_ENA, x)
+
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA          BIT(0)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
+
+/*      ANA_L3:VLAN:VLAN_MASK_CFG */
+#define ANA_L3_VLAN_MASK_CFG(g)   __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+
+/*      ANA_L3:VLAN:VLAN_MASK_CFG1 */
+#define ANA_L3_VLAN_MASK_CFG1(g)  __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+
+/*      ANA_L3:VLAN:VLAN_MASK_CFG2 */
+#define ANA_L3_VLAN_MASK_CFG2(g)  __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2    BIT(0)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
+       FIELD_PREP(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\
+       FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
+
+/*      ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
+#define ASM_RX_IN_BYTES_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
+#define ASM_RX_SYMBOL_ERR_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_PAUSE_CNT */
+#define ASM_RX_PAUSE_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
+#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
+#define ASM_RX_OK_BYTES_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
+#define ASM_RX_BAD_BYTES_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UC_CNT */
+#define ASM_RX_UC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_MC_CNT */
+#define ASM_RX_MC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_BC_CNT */
+#define ASM_RX_BC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
+#define ASM_RX_CRC_ERR_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
+#define ASM_RX_UNDERSIZE_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
+#define ASM_RX_FRAGMENTS_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
+#define ASM_RX_OVERSIZE_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_JABBERS_CNT */
+#define ASM_RX_JABBERS_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE64_CNT */
+#define ASM_RX_SIZE64_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
+#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
+#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
+#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
+#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
+#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
+#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
+#define ASM_RX_IPG_SHRINK_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
+#define ASM_TX_OUT_BYTES_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_PAUSE_CNT */
+#define ASM_TX_PAUSE_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
+#define ASM_TX_OK_BYTES_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_UC_CNT */
+#define ASM_TX_UC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_MC_CNT */
+#define ASM_TX_MC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_BC_CNT */
+#define ASM_TX_BC_CNT(g)          __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE64_CNT */
+#define ASM_TX_SIZE64_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
+#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
+#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
+#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
+#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
+#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
+#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
+#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
+#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
+#define ASM_PMAC_RX_PAUSE_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
+#define ASM_PMAC_RX_UC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
+#define ASM_PMAC_RX_MC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
+#define ASM_PMAC_RX_BC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
+#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
+#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
+#define ASM_PMAC_TX_PAUSE_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
+#define ASM_PMAC_TX_UC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
+#define ASM_PMAC_TX_MC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
+#define ASM_PMAC_TX_BC_CNT(g)     __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
+#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
+#define ASM_MM_RX_SMD_ERR_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
+#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
+#define ASM_TX_MULTI_COLL_CNT(g)  __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
+#define ASM_TX_LATE_COLL_CNT(g)   __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_XCOLL_CNT */
+#define ASM_TX_XCOLL_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_DEFER_CNT */
+#define ASM_TX_DEFER_CNT(g)       __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_XDEFER_CNT */
+#define ASM_TX_XDEFER_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
+#define ASM_TX_BACKOFF1_CNT(g)    __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:TX_CSENSE_CNT */
+#define ASM_TX_CSENSE_CNT(g)      __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
+
+/*      ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
+#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
+#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
+#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/*      ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+
+/*      ASM:CFG:STAT_CFG */
+#define ASM_STAT_CFG              __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT           BIT(0)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
+       FIELD_PREP(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\
+       FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
+
+/*      ASM:CFG:PORT_CFG */
+#define ASM_PORT_CFG(r)           __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+
+#define ASM_PORT_CFG_CSC_STAT_DIS                BIT(12)
+#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_CSC_STAT_DIS, x)
+#define ASM_PORT_CFG_CSC_STAT_DIS_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_CSC_STAT_DIS, x)
+
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA      BIT(11)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_HIH_AFTER_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA          BIT(10)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+#define ASM_PORT_CFG_IGN_TAXI_ABORT_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_IGN_TAXI_ABORT_ENA, x)
+
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA             BIT(9)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_NO_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_NO_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA           BIT(8)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+#define ASM_PORT_CFG_SKIP_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_SKIP_PREAMBLE_ENA, x)
+
+#define ASM_PORT_CFG_FRM_AGING_DIS               BIT(7)
+#define ASM_PORT_CFG_FRM_AGING_DIS_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_FRM_AGING_DIS, x)
+#define ASM_PORT_CFG_FRM_AGING_DIS_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_FRM_AGING_DIS, x)
+
+#define ASM_PORT_CFG_PAD_ENA                     BIT(6)
+#define ASM_PORT_CFG_PAD_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_PAD_ENA, x)
+#define ASM_PORT_CFG_PAD_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_PAD_ENA, x)
+
+#define ASM_PORT_CFG_INJ_DISCARD_CFG             GENMASK(5, 4)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+#define ASM_PORT_CFG_INJ_DISCARD_CFG_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_INJ_DISCARD_CFG, x)
+
+#define ASM_PORT_CFG_INJ_FORMAT_CFG              GENMASK(3, 2)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+#define ASM_PORT_CFG_INJ_FORMAT_CFG_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_INJ_FORMAT_CFG, x)
+
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA              BIT(1)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+#define ASM_PORT_CFG_VSTAX2_AWR_ENA_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_VSTAX2_AWR_ENA, x)
+
+#define ASM_PORT_CFG_PFRM_FLUSH                  BIT(0)
+#define ASM_PORT_CFG_PFRM_FLUSH_SET(x)\
+       FIELD_PREP(ASM_PORT_CFG_PFRM_FLUSH, x)
+#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\
+       FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
+
+/*      ASM:RAM_CTRL:RAM_INIT */
+#define ASM_RAM_INIT              __REG(TARGET_ASM, 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+
+#define ASM_RAM_INIT_RAM_INIT                    BIT(1)
+#define ASM_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(ASM_RAM_INIT_RAM_INIT, x)
+#define ASM_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(ASM_RAM_INIT_RAM_INIT, x)
+
+#define ASM_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
+#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV  GENMASK(7, 0)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV  GENMASK(10, 8)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR  BIT(11)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL  GENMASK(13, 12)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA  BIT(14)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA, x)
+
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA  BIT(15)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(x)\
+       FIELD_PREP(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\
+       FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
+
+/*      CPU:CPU_REGS:PROC_CTRL */
+#define CPU_PROC_CTRL             __REG(TARGET_CPU, 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA           BIT(12)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS      BIT(11)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS      BIT(10)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+
+#define CPU_PROC_CTRL_BE_EXCEP_MODE              BIT(9)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+
+#define CPU_PROC_CTRL_VINITHI                    BIT(8)
+#define CPU_PROC_CTRL_VINITHI_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_VINITHI, x)
+#define CPU_PROC_CTRL_VINITHI_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_VINITHI, x)
+
+#define CPU_PROC_CTRL_CFGTE                      BIT(7)
+#define CPU_PROC_CTRL_CFGTE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_CFGTE, x)
+#define CPU_PROC_CTRL_CFGTE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_CFGTE, x)
+
+#define CPU_PROC_CTRL_CP15S_DISABLE              BIT(6)
+#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x)
+#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x)
+
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE        BIT(5)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA        BIT(4)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+
+#define CPU_PROC_CTRL_ACP_AWCACHE                BIT(3)
+#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x)
+#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x)
+
+#define CPU_PROC_CTRL_ACP_ARCACHE                BIT(2)
+#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x)
+#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x)
+
+#define CPU_PROC_CTRL_L2_FLUSH_REQ               BIT(1)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+
+#define CPU_PROC_CTRL_ACP_DISABLE                BIT(0)
+#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\
+       FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x)
+#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\
+       FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV10G_MAC_ENA_CFG(t)     __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV10G_MAC_ENA_CFG_RX_ENA                BIT(4)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV10G_MAC_ENA_CFG_TX_ENA                BIT(0)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV10G_MAC_MAXLEN_CFG(t)  __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK    BIT(16)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+       FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+       FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN            GENMASK(15, 0)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
+#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS         GENMASK(1, 0)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
+       FIELD_PREP(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\
+       FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ID               GENMASK(31, 16)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ID_GET(x)\
+       FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA              BIT(4)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA   BIT(24)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA   BIT(20)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA       BIT(16)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS   BIT(12)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA       BIT(8)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA       BIT(4)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA       BIT(0)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY BIT(3)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_REMOTE_ERR_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY BIT(2)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_LINK_INTERRUPTION_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY BIT(1)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY, x)
+
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY BIT(0)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_SET(x)\
+       FIELD_PREP(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\
+       FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
+
+/*      DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV10G_DEV_RST_CTRL(t)    __REG(TARGET_DEV10G, t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA      BIT(28)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL     GENMASK(24, 23)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL            GENMASK(22, 20)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV10G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST           BIT(12)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST           BIT(8)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST           BIT(4)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST           BIT(0)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV10G_PCS25G_CFG(t)      __REG(TARGET_DEV10G, t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV10G_PCS25G_CFG_PCS25G_ENA             BIT(0)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+       FIELD_PREP(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+       FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV25G_MAC_ENA_CFG(t)     __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV25G_MAC_ENA_CFG_RX_ENA                BIT(4)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV25G_MAC_ENA_CFG_TX_ENA                BIT(0)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV25G_MAC_MAXLEN_CFG(t)  __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK    BIT(16)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+       FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+       FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN            GENMASK(15, 0)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA   BIT(24)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA   BIT(20)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA       BIT(16)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS   BIT(12)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA       BIT(8)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA       BIT(4)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA       BIT(0)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/*      DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV25G_DEV_RST_CTRL(t)    __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA      BIT(28)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL     GENMASK(24, 23)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL            GENMASK(22, 20)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV25G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST           BIT(12)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST           BIT(8)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST           BIT(4)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST           BIT(0)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV25G_PCS25G_CFG(t)      __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+
+#define DEV25G_PCS25G_CFG_PCS25G_ENA             BIT(0)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
+
+/*      DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
+#define DEV25G_PCS25G_SD_CFG(t)   __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+
+#define DEV25G_PCS25G_SD_CFG_SD_SEL              BIT(8)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_SEL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_POL              BIT(4)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+#define DEV25G_PCS25G_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_POL, x)
+
+#define DEV25G_PCS25G_SD_CFG_SD_ENA              BIT(0)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
+
+/*      DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV2G5_DEV_RST_CTRL(t)    __REG(TARGET_DEV2G5, t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL            GENMASK(22, 20)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV2G5_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST       BIT(17)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST       BIT(16)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_USX_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST           BIT(12)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST           BIT(8)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST           BIT(4)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST           BIT(0)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV2G5_MAC_ENA_CFG(t)     __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEV2G5_MAC_ENA_CFG_RX_ENA                BIT(4)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV2G5_MAC_ENA_CFG_TX_ENA                BIT(0)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV2G5_MAC_MODE_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA     BIT(8)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA        BIT(4)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA              BIT(0)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV2G5_MAC_MAXLEN_CFG(t)  __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN            GENMASK(15, 0)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV2G5_MAC_TAGS_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID               GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+#define DEV2G5_MAC_TAGS_CFG_TAG_ID_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA     BIT(3)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA               GENMASK(2, 1)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_PB_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA         BIT(0)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
+#define DEV2G5_MAC_TAGS_CFG2(t)   __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3             GENMASK(31, 16)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID3, x)
+
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2             GENMASK(15, 0)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\
+       FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA      BIT(0)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV2G5_MAC_IFG_CFG(t)     __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK, x)
+
+#define DEV2G5_MAC_IFG_CFG_TX_IFG                GENMASK(12, 8)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+#define DEV2G5_MAC_IFG_CFG_TX_IFG_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2               GENMASK(7, 4)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG2_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1               GENMASK(3, 0)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\
+       FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
+
+/*      DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV2G5_MAC_HDX_CFG(t)     __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC       BIT(26)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED                  GENMASK(23, 16)
+#define DEV2G5_MAC_HDX_CFG_SEED_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED, x)
+
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD             BIT(12)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV2G5_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_SEED_LOAD, x)
+
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+#define DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA, x)
+
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS          GENMASK(6, 0)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_SET(x)\
+       FIELD_PREP(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\
+       FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV2G5_PCS1G_CFG(t)       __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE        BIT(4)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE, x)
+
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA        BIT(1)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+#define DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_CFG_AN_LINK_CTRL_ENA, x)
+
+#define DEV2G5_PCS1G_CFG_PCS_ENA                 BIT(0)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV2G5_PCS1G_MODE_CFG(t)  __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA    BIT(4)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA  BIT(1)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA     BIT(0)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV2G5_PCS1G_SD_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL               BIT(8)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_POL               BIT(4)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_POL, x)
+
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA               BIT(0)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV2G5_PCS1G_ANEG_CFG(t)  __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY        GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA     BIT(8)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT, x)
+
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA           BIT(0)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
+#define DEV2G5_PCS1G_LB_CFG(t)    __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA               BIT(4)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_RA_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LB_CFG_RA_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA      BIT(1)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LB_CFG_GMII_PHY_LB_ENA, x)
+
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA      BIT(0)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY  GENMASK(31, 16)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PR              BIT(4)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PR_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PR, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY  BIT(3)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_PAGE_RX_STICKY, x)
+
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE   BIT(0)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR       GENMASK(15, 12)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT   BIT(8)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS     BIT(4)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS     BIT(0)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/*      DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV2G5_PCS1G_STICKY(t)    __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY     BIT(4)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY   BIT(0)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
+
+/*      DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
+#define DEV2G5_PCS_FX100_CFG(t)   __REG(TARGET_DEV2G5, t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_CFG_SD_SEL              BIT(26)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_SEL_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_SEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_POL              BIT(25)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+#define DEV2G5_PCS_FX100_CFG_SD_POL_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_POL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SD_ENA              BIT(24)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SD_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SD_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA        BIT(20)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_LOOPBACK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA        BIT(16)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SWAP_MII_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL            GENMASK(15, 12)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+#define DEV2G5_PCS_FX100_CFG_RXBITSEL_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_RXBITSEL, x)
+
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG          GENMASK(10, 9)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+#define DEV2G5_PCS_FX100_CFG_SIGDET_CFG_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_SIGDET_CFG, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA     BIT(8)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYST_TM_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER       GENMASK(7, 4)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+#define DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_LINKHYSTTIMER, x)
+
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA     BIT(3)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_UNIDIR_MODE_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA          BIT(2)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFCHK_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFCHK_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA          BIT(1)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_FEFGEN_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_FEFGEN_ENA, x)
+
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA             BIT(0)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
+
+/*      DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
+#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP     GENMASK(11, 8)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP, x)
+
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_PCS_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_FOUND_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SSD_ERROR_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_LOST_STICKY, x)
+
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS       BIT(2)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_FEF_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_FEF_STATUS, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT    BIT(1)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+#define DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SIGNAL_DETECT, x)
+
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS      BIT(0)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_SET(x)\
+       FIELD_PREP(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\
+       FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV5G_MAC_ENA_CFG(t)      __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+
+#define DEV5G_MAC_ENA_CFG_RX_ENA                 BIT(4)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_RX_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV5G_MAC_ENA_CFG_TX_ENA                 BIT(0)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV5G_MAC_MAXLEN_CFG(t)   __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK     BIT(16)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
+       FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_GET(x)\
+       FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK, x)
+
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN             GENMASK(15, 0)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+       FIELD_PREP(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+       FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/*      DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV5G_MAC_ADV_CHK_CFG(t)  __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA    BIT(24)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA    BIT(20)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_EXT_SOP_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA        BIT(16)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_SFD_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS    BIT(12)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_SHK_CHK_DIS, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA        BIT(8)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_PRM_CHK_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA        BIT(4)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_OOR_ERR_ENA, x)
+
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA        BIT(0)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_SET(x)\
+       FIELD_PREP(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
+       FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
+#define DEV5G_RX_PAUSE_CNT(t)     __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
+#define DEV5G_RX_UC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
+#define DEV5G_RX_MC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
+#define DEV5G_RX_BC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
+#define DEV5G_RX_CRC_ERR_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
+#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
+#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
+#define DEV5G_RX_OVERSIZE_CNT(t)  __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
+#define DEV5G_RX_JABBERS_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
+#define DEV5G_RX_SIZE64_CNT(t)    __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
+#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
+#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
+#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
+#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
+#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
+#define DEV5G_TX_PAUSE_CNT(t)     __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
+#define DEV5G_TX_UC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
+#define DEV5G_TX_MC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
+#define DEV5G_TX_BC_CNT(t)        __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
+#define DEV5G_TX_SIZE64_CNT(t)    __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
+#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
+#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
+#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
+#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
+#define DEV5G_PMAC_RX_UC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
+#define DEV5G_PMAC_RX_MC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
+#define DEV5G_PMAC_RX_BC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+                                       t, 13, 60, 0, 1, 312, 184, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+                                       t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
+#define DEV5G_PMAC_TX_UC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
+#define DEV5G_PMAC_TX_MC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
+#define DEV5G_PMAC_TX_BC_CNT(t)   __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
+#define DEV5G_RX_IN_BYTES_CNT(t)  __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
+#define DEV5G_RX_OK_BYTES_CNT(t)  __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
+#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
+#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
+#define DEV5G_TX_OK_BYTES_CNT(t)  __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+
+/*      DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
+       FIELD_PREP(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
+       FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
+
+/*      DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV5G_DEV_RST_CTRL(t)     __REG(TARGET_DEV5G, t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA       BIT(28)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA, x)
+
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(27)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+#define DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS, x)
+
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS GENMASK(26, 25)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+#define DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_MUXED_USXGMII_NETWORK_PORTS, x)
+
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL      GENMASK(24, 23)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_SERDES_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL             GENMASK(22, 20)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEV5G_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_SPEED_SEL, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST            BIT(12)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_TX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST            BIT(8)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_PCS_RX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_PCS_RX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST            BIT(4)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_TX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_TX_RST, x)
+
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST            BIT(0)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_SET(x)\
+       FIELD_PREP(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
+       FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
+
+/*      DSM:RAM_CTRL:RAM_INIT */
+#define DSM_RAM_INIT              __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+
+#define DSM_RAM_INIT_RAM_INIT                    BIT(1)
+#define DSM_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(DSM_RAM_INIT_RAM_INIT, x)
+#define DSM_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(DSM_RAM_INIT_RAM_INIT, x)
+
+#define DSM_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      DSM:CFG:BUF_CFG */
+#define DSM_BUF_CFG(r)            __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+
+#define DSM_BUF_CFG_CSC_STAT_DIS                 BIT(13)
+#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_CSC_STAT_DIS, x)
+#define DSM_BUF_CFG_CSC_STAT_DIS_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_CSC_STAT_DIS, x)
+
+#define DSM_BUF_CFG_AGING_ENA                    BIT(12)
+#define DSM_BUF_CFG_AGING_ENA_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_AGING_ENA, x)
+#define DSM_BUF_CFG_AGING_ENA_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_AGING_ENA, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS       BIT(11)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, x)
+
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT   GENMASK(10, 0)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_SET(x)\
+       FIELD_PREP(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\
+       FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
+
+/*      DSM:CFG:DEV_TX_STOP_WM_CFG */
+#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA  BIT(9)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA BIT(8)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM    GENMASK(7, 1)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, x)
+
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR    BIT(0)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(x)\
+       FIELD_PREP(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\
+       FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
+
+/*      DSM:CFG:RX_PAUSE_CFG */
+#define DSM_RX_PAUSE_CFG(r)       __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN             BIT(1)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
+       FIELD_PREP(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_GET(x)\
+       FIELD_GET(DSM_RX_PAUSE_CFG_RX_PAUSE_EN, x)
+
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL           BIT(0)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_SET(x)\
+       FIELD_PREP(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\
+       FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
+
+/*      DSM:CFG:MAC_CFG */
+#define DSM_MAC_CFG(r)            __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+
+#define DSM_MAC_CFG_TX_PAUSE_VAL                 GENMASK(31, 16)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+#define DSM_MAC_CFG_TX_PAUSE_VAL_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_TX_PAUSE_VAL, x)
+
+#define DSM_MAC_CFG_HDX_BACKPREASSURE            BIT(2)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+#define DSM_MAC_CFG_HDX_BACKPREASSURE_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_HDX_BACKPREASSURE, x)
+
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE         BIT(1)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+#define DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_SEND_PAUSE_FRM_TWICE, x)
+
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF            BIT(0)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_SET(x)\
+       FIELD_PREP(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\
+       FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
+
+/*      DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
+       FIELD_PREP(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\
+       FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
+
+/*      DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW   GENMASK(23, 0)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
+       FIELD_PREP(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\
+       FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
+
+/*      DSM:CFG:TAXI_CAL_CFG */
+#define DSM_TAXI_CAL_CFG(r)       __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+
+#define DSM_TAXI_CAL_CFG_CAL_IDX                 GENMASK(20, 15)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+#define DSM_TAXI_CAL_CFG_CAL_IDX_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_IDX, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN             GENMASK(14, 9)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_LEN, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL             GENMASK(8, 5)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_CUR_VAL_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_CUR_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL             GENMASK(4, 1)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_VAL_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_VAL, x)
+
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA             BIT(0)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(x)\
+       FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
+       FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+
+/*      EACL:POL_CFG:POL_EACL_CFG */
+#define EACL_POL_EACL_CFG         __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY     BIT(4)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_FP_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY    BIT(3)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+#define EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_ALLOW_CPU_COPY, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE       BIT(2)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_CLOSE_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_CLOSE, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN        BIT(1)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_OPEN_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_OPEN, x)
+
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT        BIT(0)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_SET(x)\
+       FIELD_PREP(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
+       FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+
+/*      EACL:RAM_CTRL:RAM_INIT */
+#define EACL_RAM_INIT             __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+
+#define EACL_RAM_INIT_RAM_INIT                   BIT(1)
+#define EACL_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(EACL_RAM_INIT_RAM_INIT, x)
+#define EACL_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(EACL_RAM_INIT_RAM_INIT, x)
+
+#define EACL_RAM_INIT_RAM_CFG_HOOK               BIT(0)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE          __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE             GENMASK(7, 0)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
+       FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
+       FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
+
+/*      FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD            __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+
+#define FDMA_CH_RELOAD_CH_RELOAD                 GENMASK(7, 0)
+#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
+       FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x)
+#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
+       FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
+
+/*      FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE           __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+
+#define FDMA_CH_DISABLE_CH_DISABLE               GENMASK(7, 0)
+#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
+       FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x)
+#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
+       FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r)           __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r)          __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP_PREV */
+#define FDMA_DCB_LLP_PREV(r)      __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
+#define FDMA_DCB_LLP_PREV1(r)     __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
+
+/*      FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r)            __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE           BIT(7)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY          BIT(6)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+
+#define FDMA_CH_CFG_CH_INJ_PORT                  BIT(5)
+#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+
+#define FDMA_CH_CFG_CH_DCB_DB_CNT                GENMASK(4, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+
+#define FDMA_CH_CFG_CH_MEM                       BIT(0)
+#define FDMA_CH_CFG_CH_MEM_SET(x)\
+       FIELD_PREP(FDMA_CH_CFG_CH_MEM, x)
+#define FDMA_CH_CFG_CH_MEM_GET(x)\
+       FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
+
+/*      FDMA:FDMA:FDMA_CH_TRANSLATE */
+#define FDMA_CH_TRANSLATE(r)      __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+
+#define FDMA_CH_TRANSLATE_OFFSET                 GENMASK(15, 0)
+#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
+       FIELD_PREP(FDMA_CH_TRANSLATE_OFFSET, x)
+#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\
+       FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
+
+/*      FDMA:FDMA:FDMA_XTR_CFG */
+#define FDMA_XTR_CFG              __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+
+#define FDMA_XTR_CFG_XTR_FIFO_WM                 GENMASK(15, 11)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
+       FIELD_PREP(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+#define FDMA_XTR_CFG_XTR_FIFO_WM_GET(x)\
+       FIELD_GET(FDMA_XTR_CFG_XTR_FIFO_WM, x)
+
+#define FDMA_XTR_CFG_XTR_ARB_SAT                 GENMASK(10, 0)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_SET(x)\
+       FIELD_PREP(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\
+       FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
+
+/*      FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r)         __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+
+#define FDMA_PORT_CTRL_INJ_STOP                  BIT(4)
+#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x)
+#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x)
+
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE            BIT(3)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+#define FDMA_PORT_CTRL_INJ_STOP_FORCE_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_INJ_STOP_FORCE, x)
+
+#define FDMA_PORT_CTRL_XTR_STOP                  BIT(2)
+#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x)
+#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY          BIT(1)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+#define FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY, x)
+
+#define FDMA_PORT_CTRL_XTR_BUF_RST               BIT(0)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_SET(x)\
+       FIELD_PREP(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\
+       FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DCB */
+#define FDMA_INTR_DCB             __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+
+#define FDMA_INTR_DCB_INTR_DCB                   GENMASK(7, 0)
+#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
+       FIELD_PREP(FDMA_INTR_DCB_INTR_DCB, x)
+#define FDMA_INTR_DCB_INTR_DCB_GET(x)\
+       FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DCB_ENA */
+#define FDMA_INTR_DCB_ENA         __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA           GENMASK(7, 0)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
+       FIELD_PREP(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\
+       FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB              __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+
+#define FDMA_INTR_DB_INTR_DB                     GENMASK(7, 0)
+#define FDMA_INTR_DB_INTR_DB_SET(x)\
+       FIELD_PREP(FDMA_INTR_DB_INTR_DB, x)
+#define FDMA_INTR_DB_INTR_DB_GET(x)\
+       FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
+
+/*      FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA          __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA             GENMASK(7, 0)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
+       FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
+       FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
+
+/*      FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR             __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+
+#define FDMA_INTR_ERR_INTR_PORT_ERR              GENMASK(9, 8)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
+       FIELD_PREP(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+#define FDMA_INTR_ERR_INTR_PORT_ERR_GET(x)\
+       FIELD_GET(FDMA_INTR_ERR_INTR_PORT_ERR, x)
+
+#define FDMA_INTR_ERR_INTR_CH_ERR                GENMASK(7, 0)
+#define FDMA_INTR_ERR_INTR_CH_ERR_SET(x)\
+       FIELD_PREP(FDMA_INTR_ERR_INTR_CH_ERR, x)
+#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\
+       FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
+
+/*      FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS               __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+
+#define FDMA_ERRORS_ERR_XTR_WR                   GENMASK(31, 30)
+#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_XTR_WR, x)
+#define FDMA_ERRORS_ERR_XTR_WR_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_XTR_WR, x)
+
+#define FDMA_ERRORS_ERR_XTR_OVF                  GENMASK(29, 28)
+#define FDMA_ERRORS_ERR_XTR_OVF_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_XTR_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_OVF_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_XTR_OVF, x)
+
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF           GENMASK(27, 26)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+#define FDMA_ERRORS_ERR_XTR_TAXI32_OVF_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_XTR_TAXI32_OVF, x)
+
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL            GENMASK(25, 24)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+#define FDMA_ERRORS_ERR_DCB_XTR_DATAL_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_DCB_XTR_DATAL, x)
+
+#define FDMA_ERRORS_ERR_DCB_RD                   GENMASK(23, 16)
+#define FDMA_ERRORS_ERR_DCB_RD_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_DCB_RD, x)
+#define FDMA_ERRORS_ERR_DCB_RD_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_DCB_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_RD                   GENMASK(15, 10)
+#define FDMA_ERRORS_ERR_INJ_RD_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_INJ_RD, x)
+#define FDMA_ERRORS_ERR_INJ_RD_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_INJ_RD, x)
+
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC          GENMASK(9, 8)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+#define FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_INJ_OUT_OF_SYNC, x)
+
+#define FDMA_ERRORS_ERR_CH_WR                    GENMASK(7, 0)
+#define FDMA_ERRORS_ERR_CH_WR_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_ERR_CH_WR, x)
+#define FDMA_ERRORS_ERR_CH_WR_GET(x)\
+       FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
+
+/*      FDMA:FDMA:FDMA_ERRORS_2 */
+#define FDMA_ERRORS_2             __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+
+#define FDMA_ERRORS_2_ERR_XTR_FRAG               GENMASK(1, 0)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
+       FIELD_PREP(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\
+       FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
+
+/*      FDMA:FDMA:FDMA_CTRL */
+#define FDMA_CTRL                 __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+
+#define FDMA_CTRL_NRESET                         BIT(0)
+#define FDMA_CTRL_NRESET_SET(x)\
+       FIELD_PREP(FDMA_CTRL_NRESET, x)
+#define FDMA_CTRL_NRESET_GET(x)\
+       FIELD_GET(FDMA_CTRL_NRESET, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:CHIP_ID */
+#define GCB_CHIP_ID               __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+
+#define GCB_CHIP_ID_REV_ID                       GENMASK(31, 28)
+#define GCB_CHIP_ID_REV_ID_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_REV_ID, x)
+#define GCB_CHIP_ID_REV_ID_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_REV_ID, x)
+
+#define GCB_CHIP_ID_PART_ID                      GENMASK(27, 12)
+#define GCB_CHIP_ID_PART_ID_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_PART_ID, x)
+#define GCB_CHIP_ID_PART_ID_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_PART_ID, x)
+
+#define GCB_CHIP_ID_MFG_ID                       GENMASK(11, 1)
+#define GCB_CHIP_ID_MFG_ID_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_MFG_ID, x)
+#define GCB_CHIP_ID_MFG_ID_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_MFG_ID, x)
+
+#define GCB_CHIP_ID_ONE                          BIT(0)
+#define GCB_CHIP_ID_ONE_SET(x)\
+       FIELD_PREP(GCB_CHIP_ID_ONE, x)
+#define GCB_CHIP_ID_ONE_GET(x)\
+       FIELD_GET(GCB_CHIP_ID_ONE, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:SOFT_RST */
+#define GCB_SOFT_RST              __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST            BIT(2)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
+       FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+#define GCB_SOFT_RST_SOFT_NON_CFG_RST_GET(x)\
+       FIELD_GET(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
+
+#define GCB_SOFT_RST_SOFT_SWC_RST                BIT(1)
+#define GCB_SOFT_RST_SOFT_SWC_RST_SET(x)\
+       FIELD_PREP(GCB_SOFT_RST_SOFT_SWC_RST, x)
+#define GCB_SOFT_RST_SOFT_SWC_RST_GET(x)\
+       FIELD_GET(GCB_SOFT_RST_SOFT_SWC_RST, x)
+
+#define GCB_SOFT_RST_SOFT_CHIP_RST               BIT(0)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_SET(x)\
+       FIELD_PREP(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\
+       FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
+#define GCB_HW_SGPIO_SD_CFG       __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA          BIT(1)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
+       FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_GET(x)\
+       FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA, x)
+
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL           BIT(0)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_SET(x)\
+       FIELD_PREP(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\
+       FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
+
+/*      DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
+       FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\
+       FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+
+/*      DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
+#define GCB_SIO_CLOCK(g)          __REG(TARGET_GCB, 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ               GENMASK(19, 8)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
+       FIELD_PREP(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+#define GCB_SIO_CLOCK_SIO_CLK_FREQ_GET(x)\
+       FIELD_GET(GCB_SIO_CLOCK_SIO_CLK_FREQ, x)
+
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD             GENMASK(7, 0)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(x)\
+       FIELD_PREP(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
+       FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+
+/*      HSCH:HSCH_MISC:SYS_CLK_PER */
+#define HSCH_SYS_CLK_PER          __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS       GENMASK(7, 0)
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\
+       FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
+       FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+
+/*      HSCH:SYSTEM:FLUSH_CTRL */
+#define HSCH_FLUSH_CTRL           __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+
+#define HSCH_FLUSH_CTRL_FLUSH_ENA                BIT(27)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+#define HSCH_FLUSH_CTRL_FLUSH_ENA_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_ENA, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SRC                BIT(26)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SRC_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SRC, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_DST                BIT(25)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_PORT               GENMASK(24, 18)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE              BIT(17)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_QUEUE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_SE                 BIT(16)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x)
+
+#define HSCH_FLUSH_CTRL_FLUSH_HIER               GENMASK(15, 0)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\
+       FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\
+       FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+
+/*      HSCH:SYSTEM:PORT_MODE */
+#define HSCH_PORT_MODE(r)         __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+
+#define HSCH_PORT_MODE_DEQUEUE_DIS               BIT(4)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+#define HSCH_PORT_MODE_DEQUEUE_DIS_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_DEQUEUE_DIS, x)
+
+#define HSCH_PORT_MODE_AGE_DIS                   BIT(3)
+#define HSCH_PORT_MODE_AGE_DIS_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_AGE_DIS, x)
+#define HSCH_PORT_MODE_AGE_DIS_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_AGE_DIS, x)
+
+#define HSCH_PORT_MODE_TRUNC_ENA                 BIT(2)
+#define HSCH_PORT_MODE_TRUNC_ENA_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_TRUNC_ENA, x)
+#define HSCH_PORT_MODE_TRUNC_ENA_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_TRUNC_ENA, x)
+
+#define HSCH_PORT_MODE_EIR_REMARK_ENA            BIT(1)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+#define HSCH_PORT_MODE_EIR_REMARK_ENA_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_EIR_REMARK_ENA, x)
+
+#define HSCH_PORT_MODE_CPU_PRIO_MODE             BIT(0)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_SET(x)\
+       FIELD_PREP(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\
+       FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
+
+/*      HSCH:SYSTEM:OUTB_SHARE_ENA */
+#define HSCH_OUTB_SHARE_ENA(r)    __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA       GENMASK(7, 0)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
+       FIELD_PREP(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\
+       FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
+
+/*      HSCH:MMGT:RESET_CFG */
+#define HSCH_RESET_CFG            __REG(TARGET_HSCH, 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+
+#define HSCH_RESET_CFG_CORE_ENA                  BIT(0)
+#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
+       FIELD_PREP(HSCH_RESET_CFG_CORE_ENA, x)
+#define HSCH_RESET_CFG_CORE_ENA_GET(x)\
+       FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
+
+/*      HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH, 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY    GENMASK(7, 0)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
+       FIELD_PREP(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
+       FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+
+/*      LRN:COMMON:COMMON_ACCESS_CTRL */
+#define LRN_COMMON_ACCESS_CTRL    __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE BIT(19)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD    GENMASK(4, 1)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD, x)
+
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT BIT(0)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(x)\
+       FIELD_PREP(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\
+       FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_0 */
+#define LRN_MAC_ACCESS_CFG_0      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID       GENMASK(28, 16)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID, x)
+
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB   GENMASK(15, 0)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_1 */
+#define LRN_MAC_ACCESS_CFG_1      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_2 */
+#define LRN_MAC_ACCESS_CFG_2      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL BIT(27)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_NXT_LRN_ALL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU    GENMASK(26, 24)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_QU, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY  BIT(23)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_CPU_COPY, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE BIT(22)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLAN_IGNORE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR    BIT(21)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_MIRROR, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG  GENMASK(20, 19)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_FLAG, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL GENMASK(18, 17)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_AGE_INTERVAL, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED    BIT(16)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD       BIT(15)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE GENMASK(14, 12)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE, x)
+
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR      GENMASK(11, 0)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
+
+/*      LRN:COMMON:MAC_ACCESS_CFG_3 */
+#define LRN_MAC_ACCESS_CFG_3      __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
+       FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\
+       FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+
+/*      LRN:COMMON:SCAN_NEXT_CFG */
+#define LRN_SCAN_NEXT_CFG         __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL GENMASK(18, 17)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NXT_LRN_ALL_UPDATE_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL    GENMASK(16, 15)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_FILTER_SEL, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA BIT(14)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_MOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA BIT(13)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_NXT_LRN_ALL_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA BIT(12)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_USE_PORT_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA BIT(11)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_REMOVE_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA BIT(10)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA BIT(9)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_INC_AGE_BITS_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA BIT(8)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_AGED_ONLY_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA BIT(7)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK GENMASK(6, 3)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+#define LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_SCAN_AGE_INTERVAL_MASK, x)
+
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA BIT(2)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_ISDX_LIMIT_IDX_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA         BIT(1)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_FID_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_FID_FILTER_ENA, x)
+
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA        BIT(0)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
+
+/*      LRN:COMMON:SCAN_NEXT_CFG_1 */
+#define LRN_SCAN_NEXT_CFG_1       __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR   GENMASK(30, 16)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR, x)
+
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK GENMASK(14, 0)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_SET(x)\
+       FIELD_PREP(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\
+       FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
+
+/*      LRN:COMMON:AUTOAGE_CFG */
+#define LRN_AUTOAGE_CFG(r)        __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+
+#define LRN_AUTOAGE_CFG_UNIT_SIZE                GENMASK(29, 28)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+#define LRN_AUTOAGE_CFG_UNIT_SIZE_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_UNIT_SIZE, x)
+
+#define LRN_AUTOAGE_CFG_PERIOD_VAL               GENMASK(27, 0)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
+
+/*      LRN:COMMON:AUTOAGE_CFG_1 */
+#define LRN_AUTOAGE_CFG_1         __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA     BIT(25)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN GENMASK(24, 15)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+#define LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_CELLS_BETWEEN_ENTRY_SCAN, x)
+
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS        GENMASK(14, 7)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+#define LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_CLK_PERIOD_01NS, x)
+
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA    BIT(6)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+#define LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_USE_PORT_FILTER_ENA, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT     GENMASK(5, 2)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT BIT(1)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_HW_SCAN_STOP_SHOT, x)
+
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA         BIT(0)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
+
+/*      LRN:COMMON:AUTOAGE_CFG_2 */
+#define LRN_AUTOAGE_CFG_2         __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW               GENMASK(17, 4)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS    GENMASK(3, 0)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\
+       FIELD_PREP(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\
+       FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
+#define PCEP_RCTRL_2_OUT_0        __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE              GENMASK(7, 0)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+#define PCEP_RCTRL_2_OUT_0_MSG_CODE_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_MSG_CODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG                   GENMASK(15, 8)
+#define PCEP_RCTRL_2_OUT_0_TAG_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG, x)
+
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN     BIT(16)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_TAG_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS           BIT(19)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+#define PCEP_RCTRL_2_OUT_0_FUNC_BYPASS_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_FUNC_BYPASS, x)
+
+#define PCEP_RCTRL_2_OUT_0_SNP                   BIT(20)
+#define PCEP_RCTRL_2_OUT_0_SNP_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_SNP, x)
+#define PCEP_RCTRL_2_OUT_0_SNP_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_SNP, x)
+
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD       BIT(22)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+#define PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_INHIBIT_PAYLOAD, x)
+
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN  BIT(23)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+#define PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_HEADER_SUBSTITUTE_EN, x)
+
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE        BIT(28)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_CFG_SHIFT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE           BIT(29)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+#define PCEP_RCTRL_2_OUT_0_INVERT_MODE_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_INVERT_MODE, x)
+
+#define PCEP_RCTRL_2_OUT_0_REGION_EN             BIT(31)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_SET(x)\
+       FIELD_PREP(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\
+       FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_OUT_0       __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW          GENMASK(15, 0)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW, x)
+
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW          GENMASK(31, 16)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_OUT_0       __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LIM_OUT_0       __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW        GENMASK(15, 0)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW, x)
+
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW        GENMASK(31, 16)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\
+       FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_TGT_OUT_0   __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_TGT_OUT_0   __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+
+/*      PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_LIM_OUT_0   __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_GET(x)\
+       FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW, x)
+
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW GENMASK(31, 2)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_SET(x)\
+       FIELD_PREP(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\
+       FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS10G_BR_PCS_CFG(t)      __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS10G_BR_PCS_CFG_PCS_ENA                BIT(31)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS10G_BR_PCS_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA       BIT(30)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX             GENMASK(29, 24)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS10G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP           BIT(18)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA             BIT(15)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS10G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS             BIT(14)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS10G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE           BIT(13)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE         BIT(12)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP           BIT(7)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS10G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA       BIT(6)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE           BIT(4)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS10G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE         BIT(3)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS10G_BR_PCS_SD_CFG(t)   __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL              BIT(8)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_POL              BIT(4)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA              BIT(0)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS25G_BR_PCS_CFG(t)      __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS25G_BR_PCS_CFG_PCS_ENA                BIT(31)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS25G_BR_PCS_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA       BIT(30)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX             GENMASK(29, 24)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS25G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP           BIT(18)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA             BIT(15)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS25G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS             BIT(14)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS25G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE           BIT(13)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE         BIT(12)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP           BIT(7)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS25G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA       BIT(6)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE           BIT(4)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS25G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE         BIT(3)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS25G_BR_PCS_SD_CFG(t)   __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL              BIT(8)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_POL              BIT(4)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA              BIT(0)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS5G_BR_PCS_CFG(t)       __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+
+#define PCS5G_BR_PCS_CFG_PCS_ENA                 BIT(31)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+#define PCS5G_BR_PCS_CFG_PCS_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_PCS_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA        BIT(30)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+#define PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_PMA_LOOPBACK_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX              GENMASK(29, 24)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+#define PCS5G_BR_PCS_CFG_SH_CNT_MAX_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_SH_CNT_MAX, x)
+
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP            BIT(18)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_RX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA              BIT(15)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+#define PCS5G_BR_PCS_CFG_RESYNC_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RESYNC_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS              BIT(14)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+#define PCS5G_BR_PCS_CFG_LF_GEN_DIS_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_LF_GEN_DIS, x)
+
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE            BIT(13)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_RX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE          BIT(12)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_RX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_RX_SCR_DISABLE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP            BIT(7)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+#define PCS5G_BR_PCS_CFG_TX_DATA_FLIP_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_TX_DATA_FLIP, x)
+
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA        BIT(6)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+#define PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_AN_LINK_CTRL_ENA, x)
+
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE            BIT(4)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+#define PCS5G_BR_PCS_CFG_TX_TEST_MODE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_TX_TEST_MODE, x)
+
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE          BIT(3)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
+
+/*      PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS5G_BR_PCS_SD_CFG(t)    __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL               BIT(8)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_SEL_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_SEL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_POL               BIT(4)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_POL_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_POL, x)
+
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA               BIT(0)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_SET(x)\
+       FIELD_PREP(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
+       FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
+
+/*      PORT_CONF:HW_CFG:DEV5G_MODES */
+#define PORT_CONF_DEV5G_MODES     __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE      BIT(0)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE      BIT(1)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE      BIT(2)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE      BIT(3)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE      BIT(4)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE      BIT(5)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE      BIT(6)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE      BIT(7)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE      BIT(8)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE      BIT(9)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE     BIT(10)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE     BIT(11)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE     BIT(12)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
+
+/*      PORT_CONF:HW_CFG:DEV10G_MODES */
+#define PORT_CONF_DEV10G_MODES    __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE   BIT(0)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE   BIT(1)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE   BIT(2)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE   BIT(3)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE   BIT(4)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE   BIT(5)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE   BIT(6)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE   BIT(7)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE   BIT(8)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE   BIT(9)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE   BIT(10)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE   BIT(11)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
+
+/*      PORT_CONF:HW_CFG:DEV25G_MODES */
+#define PORT_CONF_DEV25G_MODES    __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE   BIT(0)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE   BIT(1)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D57_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE   BIT(2)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D58_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE   BIT(3)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D59_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE   BIT(4)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D60_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE   BIT(5)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D61_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE   BIT(6)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D62_MODE, x)
+
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE   BIT(7)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
+
+/*      PORT_CONF:HW_CFG:QSGMII_ENA */
+#define PORT_CONF_QSGMII_ENA      __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0        BIT(0)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_0, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1        BIT(1)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_1_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_1, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2        BIT(2)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_2_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_2, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3        BIT(3)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_3_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_3, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4        BIT(4)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_4_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_4, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5        BIT(5)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6        BIT(6)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7        BIT(7)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8        BIT(8)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9        BIT(9)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10       BIT(10)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11       BIT(11)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\
+       FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\
+       FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
+
+/*      PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
+#define PORT_CONF_USGMII_CFG(g)   __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM        BIT(9)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_SCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM      BIT(8)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+#define PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_BYPASS_DESCRAM, x)
+
+#define PORT_CONF_USGMII_CFG_FLIP_LANES          BIT(7)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+#define PORT_CONF_USGMII_CFG_FLIP_LANES_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_FLIP_LANES, x)
+
+#define PORT_CONF_USGMII_CFG_SHYST_DIS           BIT(6)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+#define PORT_CONF_USGMII_CFG_SHYST_DIS_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_SHYST_DIS, x)
+
+#define PORT_CONF_USGMII_CFG_E_DET_ENA           BIT(5)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+#define PORT_CONF_USGMII_CFG_E_DET_ENA_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_E_DET_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA          BIT(4)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+#define PORT_CONF_USGMII_CFG_USE_I1_ENA_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_USE_I1_ENA, x)
+
+#define PORT_CONF_USGMII_CFG_QUAD_MODE           BIT(1)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_SET(x)\
+       FIELD_PREP(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
+       FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+
+/*      QFWD:SYSTEM:SWITCH_PORT_MODE */
+#define QFWD_SWITCH_PORT_MODE(r)  __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA           BIT(19)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+#define QFWD_SWITCH_PORT_MODE_PORT_ENA_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_PORT_ENA, x)
+
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY        GENMASK(18, 10)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+#define QFWD_SWITCH_PORT_MODE_FWD_URGENCY_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_FWD_URGENCY, x)
+
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD          GENMASK(9, 6)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+#define QFWD_SWITCH_PORT_MODE_YEL_RSRVD_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_YEL_RSRVD, x)
+
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE  BIT(5)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING     BIT(4)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_IGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING     BIT(3)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+#define QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_EGR_NO_SHARING, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE   BIT(2)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_DROP_MODE, x)
+
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS    BIT(1)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+#define QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_EGRESS_RSRV_DIS, x)
+
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE      BIT(0)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_SET(x)\
+       FIELD_PREP(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
+       FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+
+/*      QRES:RES_CTRL:RES_CFG */
+#define QRES_RES_CFG(g)           __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+
+#define QRES_RES_CFG_WM_HIGH                     GENMASK(11, 0)
+#define QRES_RES_CFG_WM_HIGH_SET(x)\
+       FIELD_PREP(QRES_RES_CFG_WM_HIGH, x)
+#define QRES_RES_CFG_WM_HIGH_GET(x)\
+       FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
+
+/*      QRES:RES_CTRL:RES_STAT */
+#define QRES_RES_STAT(g)          __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+
+#define QRES_RES_STAT_MAXUSE                     GENMASK(20, 0)
+#define QRES_RES_STAT_MAXUSE_SET(x)\
+       FIELD_PREP(QRES_RES_STAT_MAXUSE, x)
+#define QRES_RES_STAT_MAXUSE_GET(x)\
+       FIELD_GET(QRES_RES_STAT_MAXUSE, x)
+
+/*      QRES:RES_CTRL:RES_STAT_CUR */
+#define QRES_RES_STAT_CUR(g)      __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+
+#define QRES_RES_STAT_CUR_INUSE                  GENMASK(20, 0)
+#define QRES_RES_STAT_CUR_INUSE_SET(x)\
+       FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x)
+#define QRES_RES_STAT_CUR_INUSE_GET(x)\
+       FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
+
+/*      DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r)         __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE                      GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+       FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+       FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS           BIT(1)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(x)\
+       FIELD_PREP(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+#define QS_XTR_GRP_CFG_STATUS_WORD_POS_GET(x)\
+       FIELD_GET(QS_XTR_GRP_CFG_STATUS_WORD_POS, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP                 BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+       FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+       FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/*      DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r)              __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/*      DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH              __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+#define QS_XTR_FLUSH_FLUSH                       GENMASK(1, 0)
+#define QS_XTR_FLUSH_FLUSH_SET(x)\
+       FIELD_PREP(QS_XTR_FLUSH_FLUSH, x)
+#define QS_XTR_FLUSH_FLUSH_GET(x)\
+       FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
+
+/*      DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT       __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT         GENMASK(1, 0)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
+       FIELD_PREP(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\
+       FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
+
+/*      DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r)         __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE                      GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+       FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+       FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP                 BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+       FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+       FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/*      DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r)              __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/*      DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r)            __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE                     GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_ABORT                        BIT(20)
+#define QS_INJ_CTRL_ABORT_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_ABORT, x)
+#define QS_INJ_CTRL_ABORT_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_ABORT, x)
+
+#define QS_INJ_CTRL_EOF                          BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF                          BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES                    GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+       FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+       FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/*      DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS             __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED              GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+       FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+       FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY                   GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+       FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+       FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+#define QS_INJ_STATUS_INJ_IN_PROGRESS            GENMASK(1, 0)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_SET(x)\
+       FIELD_PREP(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\
+       FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
+
+/*      QSYS:PAUSE_CFG:PAUSE_CFG */
+#define QSYS_PAUSE_CFG(r)         __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+
+#define QSYS_PAUSE_CFG_PAUSE_START               GENMASK(25, 14)
+#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x)
+#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_STOP                GENMASK(13, 2)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define QSYS_PAUSE_CFG_PAUSE_ENA                 BIT(1)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+#define QSYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_PAUSE_ENA, x)
+
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA   BIT(0)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_SET(x)\
+       FIELD_PREP(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\
+       FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
+
+/*      QSYS:PAUSE_CFG:ATOP */
+#define QSYS_ATOP(r)              __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+
+#define QSYS_ATOP_ATOP                           GENMASK(11, 0)
+#define QSYS_ATOP_ATOP_SET(x)\
+       FIELD_PREP(QSYS_ATOP_ATOP, x)
+#define QSYS_ATOP_ATOP_GET(x)\
+       FIELD_GET(QSYS_ATOP_ATOP, x)
+
+/*      QSYS:PAUSE_CFG:FWD_PRESSURE */
+#define QSYS_FWD_PRESSURE(r)      __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE           GENMASK(11, 1)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
+       FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_GET(x)\
+       FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE, x)
+
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS       BIT(0)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(x)\
+       FIELD_PREP(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\
+       FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
+
+/*      QSYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define QSYS_ATOP_TOT_CFG         __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT               GENMASK(11, 0)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
+       FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\
+       FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+
+/*      QSYS:CALCFG:CAL_AUTO */
+#define QSYS_CAL_AUTO(r)          __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+
+#define QSYS_CAL_AUTO_CAL_AUTO                   GENMASK(29, 0)
+#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
+       FIELD_PREP(QSYS_CAL_AUTO_CAL_AUTO, x)
+#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\
+       FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
+
+/*      QSYS:CALCFG:CAL_CTRL */
+#define QSYS_CAL_CTRL             __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+
+#define QSYS_CAL_CTRL_CAL_MODE                   GENMASK(14, 11)
+#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
+       FIELD_PREP(QSYS_CAL_CTRL_CAL_MODE, x)
+#define QSYS_CAL_CTRL_CAL_MODE_GET(x)\
+       FIELD_GET(QSYS_CAL_CTRL_CAL_MODE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE        GENMASK(10, 1)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(x)\
+       FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_GET(x)\
+       FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE, x)
+
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR             BIT(0)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_SET(x)\
+       FIELD_PREP(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\
+       FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
+
+/*      QSYS:RAM_CTRL:RAM_INIT */
+#define QSYS_RAM_INIT             __REG(TARGET_QSYS, 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_RAM_INIT_RAM_INIT                   BIT(1)
+#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(QSYS_RAM_INIT_RAM_INIT, x)
+#define QSYS_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(QSYS_RAM_INIT_RAM_INIT, x)
+
+#define QSYS_RAM_INIT_RAM_CFG_HOOK               BIT(0)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      REW:COMMON:OWN_UPSID */
+#define REW_OWN_UPSID(r)          __REG(TARGET_REW, 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+
+#define REW_OWN_UPSID_OWN_UPSID                  GENMASK(4, 0)
+#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
+       FIELD_PREP(REW_OWN_UPSID_OWN_UPSID, x)
+#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
+       FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
+
+/*      REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g)      __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_PCP               GENMASK(15, 13)
+#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
+       FIELD_PREP(REW_PORT_VLAN_CFG_PORT_PCP, x)
+#define REW_PORT_VLAN_CFG_PORT_PCP_GET(x)\
+       FIELD_GET(REW_PORT_VLAN_CFG_PORT_PCP, x)
+
+#define REW_PORT_VLAN_CFG_PORT_DEI               BIT(12)
+#define REW_PORT_VLAN_CFG_PORT_DEI_SET(x)\
+       FIELD_PREP(REW_PORT_VLAN_CFG_PORT_DEI, x)
+#define REW_PORT_VLAN_CFG_PORT_DEI_GET(x)\
+       FIELD_GET(REW_PORT_VLAN_CFG_PORT_DEI, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID               GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+       FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+       FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/*      REW:PORT:TAG_CTRL */
+#define REW_TAG_CTRL(g)           __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED     BIT(13)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED, x)
+
+#define REW_TAG_CTRL_TAG_CFG                     GENMASK(12, 11)
+#define REW_TAG_CTRL_TAG_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_CFG, x)
+#define REW_TAG_CTRL_TAG_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_CFG, x)
+
+#define REW_TAG_CTRL_TAG_TPID_CFG                GENMASK(10, 8)
+#define REW_TAG_CTRL_TAG_TPID_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_TPID_CFG, x)
+#define REW_TAG_CTRL_TAG_TPID_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_TPID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_VID_CFG                 GENMASK(7, 6)
+#define REW_TAG_CTRL_TAG_VID_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_VID_CFG, x)
+#define REW_TAG_CTRL_TAG_VID_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_VID_CFG, x)
+
+#define REW_TAG_CTRL_TAG_PCP_CFG                 GENMASK(5, 3)
+#define REW_TAG_CTRL_TAG_PCP_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_PCP_CFG, x)
+#define REW_TAG_CTRL_TAG_PCP_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_PCP_CFG, x)
+
+#define REW_TAG_CTRL_TAG_DEI_CFG                 GENMASK(2, 0)
+#define REW_TAG_CTRL_TAG_DEI_CFG_SET(x)\
+       FIELD_PREP(REW_TAG_CTRL_TAG_DEI_CFG, x)
+#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
+       FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+
+/*      REW:RAM_CTRL:RAM_INIT */
+#define REW_RAM_INIT              __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+
+#define REW_RAM_INIT_RAM_INIT                    BIT(1)
+#define REW_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(REW_RAM_INIT_RAM_INIT, x)
+#define REW_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(REW_RAM_INIT_RAM_INIT, x)
+
+#define REW_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define REW_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(REW_RAM_INIT_RAM_CFG_HOOK, x)
+#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      VCAP_SUPER:RAM_CTRL:RAM_INIT */
+#define VCAP_SUPER_RAM_INIT       __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_SUPER_RAM_INIT_RAM_INIT             BIT(1)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+#define VCAP_SUPER_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_INIT, x)
+
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK         BIT(0)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      VOP:RAM_CTRL:RAM_INIT */
+#define VOP_RAM_INIT              __REG(TARGET_VOP, 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+
+#define VOP_RAM_INIT_RAM_INIT                    BIT(1)
+#define VOP_RAM_INIT_RAM_INIT_SET(x)\
+       FIELD_PREP(VOP_RAM_INIT_RAM_INIT, x)
+#define VOP_RAM_INIT_RAM_INIT_GET(x)\
+       FIELD_GET(VOP_RAM_INIT_RAM_INIT, x)
+
+#define VOP_RAM_INIT_RAM_CFG_HOOK                BIT(0)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_SET(x)\
+       FIELD_PREP(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\
+       FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
+
+/*      XQS:SYSTEM:STAT_CFG */
+#define XQS_STAT_CFG              __REG(TARGET_XQS, 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT             GENMASK(21, 18)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
+
+#define XQS_STAT_CFG_STAT_VIEW                   GENMASK(17, 5)
+#define XQS_STAT_CFG_STAT_VIEW_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x)
+#define XQS_STAT_CFG_STAT_VIEW_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x)
+
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY           BIT(4)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_SRV_PKT_ONLY, x)
+
+#define XQS_STAT_CFG_STAT_WRAP_DIS               GENMASK(3, 0)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_SET(x)\
+       FIELD_PREP(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\
+       FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
+#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP    GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP  GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP  GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+
+/*      XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM  GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
+       FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\
+       FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+
+/*      XQS:STAT:CNT */
+#define XQS_CNT(g)                __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+
+#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
new file mode 100644 (file)
index 0000000..9d485a9
--- /dev/null
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+/* The IFH bit position of the first VSTAX bit. This is because the
+ * VSTAX bit positions in Data sheet is starting from zero.
+ */
+#define VSTAX 73
+
+static void ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
+{
+       u8 *ifh_hdr = ifh;
+       /* Calculate the Start IFH byte position of this IFH bit position */
+       u32 byte = (35 - (pos / 8));
+       /* Calculate the Start bit position in the Start IFH byte */
+       u32 bit  = (pos % 8);
+       u64 encode = GENMASK(bit + width - 1, bit) & (value << bit);
+
+       /* Max width is 5 bytes - 40 bits. In worst case this will
+        * spread over 6 bytes - 48 bits
+        */
+       compiletime_assert(width <= 40, "Unsupported width, must be <= 40");
+
+       /* The b0-b7 goes into the start IFH byte */
+       if (encode & 0xFF)
+               ifh_hdr[byte] |= (u8)((encode & 0xFF));
+       /* The b8-b15 goes into the next IFH byte */
+       if (encode & 0xFF00)
+               ifh_hdr[byte - 1] |= (u8)((encode & 0xFF00) >> 8);
+       /* The b16-b23 goes into the next IFH byte */
+       if (encode & 0xFF0000)
+               ifh_hdr[byte - 2] |= (u8)((encode & 0xFF0000) >> 16);
+       /* The b24-b31 goes into the next IFH byte */
+       if (encode & 0xFF000000)
+               ifh_hdr[byte - 3] |= (u8)((encode & 0xFF000000) >> 24);
+       /* The b32-b39 goes into the next IFH byte */
+       if (encode & 0xFF00000000)
+               ifh_hdr[byte - 4] |= (u8)((encode & 0xFF00000000) >> 32);
+       /* The b40-b47 goes into the next IFH byte */
+       if (encode & 0xFF0000000000)
+               ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
+}
+
+static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+{
+       /* VSTAX.RSV = 1. MSBit must be 1 */
+       ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79,  1);
+       /* VSTAX.INGR_DROP_MODE = Enable. Don't make head-of-line blocking */
+       ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 55,  1);
+       /* MISC.CPU_MASK/DPORT = Destination port */
+       ifh_encode_bitfield(ifh_hdr, portno,   29, 8);
+       /* MISC.PIPELINE_PT */
+       ifh_encode_bitfield(ifh_hdr, 16,       37, 5);
+       /* MISC.PIPELINE_ACT */
+       ifh_encode_bitfield(ifh_hdr, 1,        42, 3);
+       /* FWD.SRC_PORT = CPU */
+       ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7);
+       /* FWD.SFLOW_ID (disable SFlow sampling) */
+       ifh_encode_bitfield(ifh_hdr, 124,      57, 7);
+       /* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */
+       ifh_encode_bitfield(ifh_hdr, 1,        67, 1);
+}
+
+static int sparx5_port_open(struct net_device *ndev)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       int err = 0;
+
+       sparx5_port_enable(port, true);
+       err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
+       if (err) {
+               netdev_err(ndev, "Could not attach to PHY\n");
+               return err;
+       }
+
+       phylink_start(port->phylink);
+
+       if (!ndev->phydev) {
+               /* power up serdes */
+               port->conf.power_down = false;
+               if (port->conf.serdes_reset)
+                       err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+               else
+                       err = phy_power_on(port->serdes);
+               if (err)
+                       netdev_err(ndev, "%s failed\n", __func__);
+       }
+
+       return err;
+}
+
+static int sparx5_port_stop(struct net_device *ndev)
+{
+       struct sparx5_port *port = netdev_priv(ndev);
+       int err = 0;
+
+       sparx5_port_enable(port, false);
+       phylink_stop(port->phylink);
+       phylink_disconnect_phy(port->phylink);
+
+       if (!ndev->phydev) {
+               /* power down serdes */
+               port->conf.power_down = true;
+               if (port->conf.serdes_reset)
+                       err = sparx5_serdes_set(port->sparx5, port, &port->conf);
+               else
+                       err = phy_power_off(port->serdes);
+               if (err)
+                       netdev_err(ndev, "%s failed\n", __func__);
+       }
+       return 0;
+}
+
+static void sparx5_set_rx_mode(struct net_device *dev)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+
+       if (!test_bit(port->portno, sparx5->bridge_mask))
+               __dev_mc_sync(dev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_get_phys_port_name(struct net_device *dev,
+                                         char *buf, size_t len)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       int ret;
+
+       ret = snprintf(buf, len, "p%d", port->portno);
+       if (ret >= len)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int sparx5_set_mac_address(struct net_device *dev, void *p)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+       const struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       /* Remove current */
+       sparx5_mact_forget(sparx5, dev->dev_addr,  port->pvid);
+
+       /* Add new */
+       sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
+
+       /* Record the address */
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+       return 0;
+}
+
+static int sparx5_get_port_parent_id(struct net_device *dev,
+                                    struct netdev_phys_item_id *ppid)
+{
+       struct sparx5_port *sparx5_port = netdev_priv(dev);
+       struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+       ppid->id_len = sizeof(sparx5->base_mac);
+       memcpy(&ppid->id, &sparx5->base_mac, ppid->id_len);
+
+       return 0;
+}
+
+static const struct net_device_ops sparx5_port_netdev_ops = {
+       .ndo_open               = sparx5_port_open,
+       .ndo_stop               = sparx5_port_stop,
+       .ndo_start_xmit         = sparx5_port_xmit_impl,
+       .ndo_set_rx_mode        = sparx5_set_rx_mode,
+       .ndo_get_phys_port_name = sparx5_port_get_phys_port_name,
+       .ndo_set_mac_address    = sparx5_set_mac_address,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_get_stats64        = sparx5_get_stats64,
+       .ndo_get_port_parent_id = sparx5_get_port_parent_id,
+};
+
+bool sparx5_netdevice_check(const struct net_device *dev)
+{
+       return dev && (dev->netdev_ops == &sparx5_port_netdev_ops);
+}
+
+struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
+{
+       struct sparx5_port *spx5_port;
+       struct net_device *ndev;
+       u64 val;
+
+       ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
+       if (!ndev)
+               return ERR_PTR(-ENOMEM);
+
+       SET_NETDEV_DEV(ndev, sparx5->dev);
+       spx5_port = netdev_priv(ndev);
+       spx5_port->ndev = ndev;
+       spx5_port->sparx5 = sparx5;
+       spx5_port->portno = portno;
+       sparx5_set_port_ifh(spx5_port->ifh, portno);
+
+       ndev->netdev_ops = &sparx5_port_netdev_ops;
+       ndev->ethtool_ops = &sparx5_ethtool_ops;
+
+       val = ether_addr_to_u64(sparx5->base_mac) + portno + 1;
+       u64_to_ether_addr(val, ndev->dev_addr);
+
+       return ndev;
+}
+
+int sparx5_register_netdevs(struct sparx5 *sparx5)
+{
+       int portno;
+       int err;
+
+       for (portno = 0; portno < SPX5_PORTS; portno++)
+               if (sparx5->ports[portno]) {
+                       err = register_netdev(sparx5->ports[portno]->ndev);
+                       if (err) {
+                               dev_err(sparx5->dev,
+                                       "port: %02u: netdev registration failed\n",
+                                       portno);
+                               return err;
+                       }
+                       sparx5_port_inj_timer_setup(sparx5->ports[portno]);
+               }
+       return 0;
+}
+
+void sparx5_destroy_netdevs(struct sparx5 *sparx5)
+{
+       struct sparx5_port *port;
+       int portno;
+
+       for (portno = 0; portno < SPX5_PORTS; portno++) {
+               port = sparx5->ports[portno];
+               if (port && port->phylink) {
+                       /* Disconnect the phy */
+                       rtnl_lock();
+                       sparx5_port_stop(port->ndev);
+                       phylink_disconnect_phy(port->phylink);
+                       rtnl_unlock();
+                       phylink_destroy(port->phylink);
+                       port->phylink = NULL;
+               }
+       }
+}
+
+void sparx5_unregister_netdevs(struct sparx5 *sparx5)
+{
+       int portno;
+
+       for (portno = 0; portno < SPX5_PORTS; portno++)
+               if (sparx5->ports[portno])
+                       unregister_netdev(sparx5->ports[portno]->ndev);
+}
+
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
new file mode 100644 (file)
index 0000000..09ca7a3
--- /dev/null
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define XTR_EOF_0     ntohl((__force __be32)0x80000000u)
+#define XTR_EOF_1     ntohl((__force __be32)0x80000001u)
+#define XTR_EOF_2     ntohl((__force __be32)0x80000002u)
+#define XTR_EOF_3     ntohl((__force __be32)0x80000003u)
+#define XTR_PRUNED    ntohl((__force __be32)0x80000004u)
+#define XTR_ABORT     ntohl((__force __be32)0x80000005u)
+#define XTR_ESCAPE    ntohl((__force __be32)0x80000006u)
+#define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
+
+#define XTR_VALID_BYTES(x)      (4 - ((x) & 3))
+
+#define INJ_TIMEOUT_NS 50000
+
+struct frame_info {
+       int src_port;
+};
+
+static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
+{
+       /* Start flush */
+       spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH);
+
+       /* Allow to drain */
+       mdelay(1);
+
+       /* All Queues normal */
+       spx5_wr(0, sparx5, QS_XTR_FLUSH);
+}
+
+static void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
+{
+       u8 *xtr_hdr = (u8 *)ifh;
+
+       /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */
+       u32 fwd =
+               ((u32)xtr_hdr[27] << 24) |
+               ((u32)xtr_hdr[28] << 16) |
+               ((u32)xtr_hdr[29] <<  8) |
+               ((u32)xtr_hdr[30] <<  0);
+       fwd = (fwd >> 5);
+       info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+}
+
+static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
+{
+       bool eof_flag = false, pruned_flag = false, abort_flag = false;
+       struct net_device *netdev;
+       struct sparx5_port *port;
+       struct frame_info fi;
+       int i, byte_cnt = 0;
+       struct sk_buff *skb;
+       u32 ifh[IFH_LEN];
+       u32 *rxbuf;
+
+       /* Get IFH */
+       for (i = 0; i < IFH_LEN; i++)
+               ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
+
+       /* Decode IFH (whats needed) */
+       sparx5_ifh_parse(ifh, &fi);
+
+       /* Map to port netdev */
+       port = fi.src_port < SPX5_PORTS ?
+               sparx5->ports[fi.src_port] : NULL;
+       if (!port || !port->ndev) {
+               dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
+               sparx5_xtr_flush(sparx5, grp);
+               return;
+       }
+
+       /* Have netdev, get skb */
+       netdev = port->ndev;
+       skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN);
+       if (!skb) {
+               sparx5_xtr_flush(sparx5, grp);
+               dev_err(sparx5->dev, "No skb allocated\n");
+               netdev->stats.rx_dropped++;
+               return;
+       }
+       rxbuf = (u32 *)skb->data;
+
+       /* Now, pull frame data */
+       while (!eof_flag) {
+               u32 val = spx5_rd(sparx5, QS_XTR_RD(grp));
+               u32 cmp = val;
+
+               if (byte_swap)
+                       cmp = ntohl((__force __be32)val);
+
+               switch (cmp) {
+               case XTR_NOT_READY:
+                       break;
+               case XTR_ABORT:
+                       /* No accompanying data */
+                       abort_flag = true;
+                       eof_flag = true;
+                       break;
+               case XTR_EOF_0:
+               case XTR_EOF_1:
+               case XTR_EOF_2:
+               case XTR_EOF_3:
+                       /* This assumes STATUS_WORD_POS == 1, Status
+                        * just after last data
+                        */
+                       byte_cnt -= (4 - XTR_VALID_BYTES(val));
+                       eof_flag = true;
+                       break;
+               case XTR_PRUNED:
+                       /* But get the last 4 bytes as well */
+                       eof_flag = true;
+                       pruned_flag = true;
+                       fallthrough;
+               case XTR_ESCAPE:
+                       *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp));
+                       byte_cnt += 4;
+                       rxbuf++;
+                       break;
+               default:
+                       *rxbuf = val;
+                       byte_cnt += 4;
+                       rxbuf++;
+               }
+       }
+
+       if (abort_flag || pruned_flag || !eof_flag) {
+               netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n",
+                          abort_flag, pruned_flag, eof_flag);
+               kfree_skb(skb);
+               netdev->stats.rx_dropped++;
+               return;
+       }
+
+       /* Everything we see on an interface that is in the HW bridge
+        * has already been forwarded
+        */
+       if (test_bit(port->portno, sparx5->bridge_mask))
+               skb->offload_fwd_mark = 1;
+
+       /* Finish up skb */
+       skb_put(skb, byte_cnt - ETH_FCS_LEN);
+       eth_skb_pad(skb);
+       skb->protocol = eth_type_trans(skb, netdev);
+       netif_rx(skb);
+       netdev->stats.rx_bytes += skb->len;
+       netdev->stats.rx_packets++;
+}
+
+static int sparx5_inject(struct sparx5 *sparx5,
+                        u32 *ifh,
+                        struct sk_buff *skb,
+                        struct net_device *ndev)
+{
+       int grp = INJ_QUEUE;
+       u32 val, w, count;
+       u8 *buf;
+
+       val = spx5_rd(sparx5, QS_INJ_STATUS);
+       if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) {
+               pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
+                                  QS_INJ_STATUS_FIFO_RDY_GET(val));
+               return -EBUSY;
+       }
+
+       /* Indicate SOF */
+       spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
+               QS_INJ_CTRL_GAP_SIZE_SET(1),
+               sparx5, QS_INJ_CTRL(grp));
+
+       /* Write the IFH to the chip. */
+       for (w = 0; w < IFH_LEN; w++)
+               spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp));
+
+       /* Write words, round up */
+       count = DIV_ROUND_UP(skb->len, 4);
+       buf = skb->data;
+       for (w = 0; w < count; w++, buf += 4) {
+               val = get_unaligned((const u32 *)buf);
+               spx5_wr(val, sparx5, QS_INJ_WR(grp));
+       }
+
+       /* Add padding */
+       while (w < (60 / 4)) {
+               spx5_wr(0, sparx5, QS_INJ_WR(grp));
+               w++;
+       }
+
+       /* Indicate EOF and valid bytes in last word */
+       spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+               QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) |
+               QS_INJ_CTRL_EOF_SET(1),
+               sparx5, QS_INJ_CTRL(grp));
+
+       /* Add dummy CRC */
+       spx5_wr(0, sparx5, QS_INJ_WR(grp));
+       w++;
+
+       val = spx5_rd(sparx5, QS_INJ_STATUS);
+       if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+               struct sparx5_port *port = netdev_priv(ndev);
+
+               pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
+                                  QS_INJ_STATUS_WMARK_REACHED_GET(val));
+               netif_stop_queue(ndev);
+               hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS,
+                             HRTIMER_MODE_REL);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+{
+       struct net_device_stats *stats = &dev->stats;
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+       int ret;
+
+       ret = sparx5_inject(sparx5, port->ifh, skb, dev);
+
+       if (ret == NETDEV_TX_OK) {
+               stats->tx_bytes += skb->len;
+               stats->tx_packets++;
+               skb_tx_timestamp(skb);
+               dev_kfree_skb_any(skb);
+       } else {
+               stats->tx_dropped++;
+       }
+       return ret;
+}
+
+static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
+{
+       struct sparx5_port *port = container_of(tmr, struct sparx5_port,
+                                               inj_timer);
+       int grp = INJ_QUEUE;
+       u32 val;
+
+       val = spx5_rd(port->sparx5, QS_INJ_STATUS);
+       if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
+               pr_err_ratelimited("Injection: Reset watermark count\n");
+               /* Reset Watermark count to restart */
+               spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+                        DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+                        port->sparx5,
+                        DSM_DEV_TX_STOP_WM_CFG(port->portno));
+       }
+       netif_wake_queue(port->ndev);
+       return HRTIMER_NORESTART;
+}
+
+int sparx5_manual_injection_mode(struct sparx5 *sparx5)
+{
+       const int byte_swap = 1;
+       int portno;
+
+       /* Change mode to manual extraction and injection */
+       spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+               QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
+               QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+               sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
+       spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+               QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
+               sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
+
+       /* CPU ports capture setup */
+       for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+               /* ASM CPU port: No preamble, IFH, enable padding */
+               spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
+                       ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
+                       ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
+                       sparx5, ASM_PORT_CFG(portno));
+
+               /* Reset WM cnt to unclog queued frames */
+               spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
+                        DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
+                        sparx5,
+                        DSM_DEV_TX_STOP_WM_CFG(portno));
+
+               /* Set Disassembler Stop Watermark level */
+               spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
+                        DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+                        sparx5,
+                        DSM_DEV_TX_STOP_WM_CFG(portno));
+
+               /* Enable Disassembler buffer underrun watchdog
+                */
+               spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
+                        DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
+                        sparx5,
+                        DSM_BUF_CFG(portno));
+       }
+       return 0;
+}
+
+irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
+{
+       struct sparx5 *s5 = _sparx5;
+       int poll = 64;
+
+       /* Check data in queue */
+       while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0)
+               sparx5_xtr_grp(s5, XTR_QUEUE, false);
+
+       return IRQ_HANDLED;
+}
+
+void sparx5_port_inj_timer_setup(struct sparx5_port *port)
+{
+       hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       port->inj_timer.function = sparx5_injection_timeout;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
new file mode 100644 (file)
index 0000000..af70e27
--- /dev/null
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/sfp.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_port_config *b)
+{
+       if (a->speed != b->speed ||
+           a->portmode != b->portmode ||
+           a->autoneg != b->autoneg ||
+           a->pause_adv != b->pause_adv ||
+           a->power_down != b->power_down ||
+           a->media != b->media)
+               return true;
+       return false;
+}
+
+static void sparx5_phylink_validate(struct phylink_config *config,
+                                   unsigned long *supported,
+                                   struct phylink_link_state *state)
+{
+       struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+       phylink_set(mask, Autoneg);
+       phylink_set_port_modes(mask);
+       phylink_set(mask, Pause);
+       phylink_set(mask, Asym_Pause);
+
+       switch (state->interface) {
+       case PHY_INTERFACE_MODE_5GBASER:
+       case PHY_INTERFACE_MODE_10GBASER:
+       case PHY_INTERFACE_MODE_25GBASER:
+       case PHY_INTERFACE_MODE_NA:
+               if (port->conf.bandwidth == SPEED_5000)
+                       phylink_set(mask, 5000baseT_Full);
+               if (port->conf.bandwidth == SPEED_10000) {
+                       phylink_set(mask, 5000baseT_Full);
+                       phylink_set(mask, 10000baseT_Full);
+                       phylink_set(mask, 10000baseCR_Full);
+                       phylink_set(mask, 10000baseSR_Full);
+                       phylink_set(mask, 10000baseLR_Full);
+                       phylink_set(mask, 10000baseLRM_Full);
+                       phylink_set(mask, 10000baseER_Full);
+               }
+               if (port->conf.bandwidth == SPEED_25000) {
+                       phylink_set(mask, 5000baseT_Full);
+                       phylink_set(mask, 10000baseT_Full);
+                       phylink_set(mask, 10000baseCR_Full);
+                       phylink_set(mask, 10000baseSR_Full);
+                       phylink_set(mask, 10000baseLR_Full);
+                       phylink_set(mask, 10000baseLRM_Full);
+                       phylink_set(mask, 10000baseER_Full);
+                       phylink_set(mask, 25000baseCR_Full);
+                       phylink_set(mask, 25000baseSR_Full);
+               }
+               if (state->interface != PHY_INTERFACE_MODE_NA)
+                       break;
+               fallthrough;
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+               phylink_set(mask, 10baseT_Half);
+               phylink_set(mask, 10baseT_Full);
+               phylink_set(mask, 100baseT_Half);
+               phylink_set(mask, 100baseT_Full);
+               phylink_set(mask, 1000baseT_Full);
+               phylink_set(mask, 1000baseX_Full);
+               if (state->interface != PHY_INTERFACE_MODE_NA)
+                       break;
+               fallthrough;
+       case PHY_INTERFACE_MODE_1000BASEX:
+       case PHY_INTERFACE_MODE_2500BASEX:
+               if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+                       phylink_set(mask, 1000baseT_Full);
+                       phylink_set(mask, 1000baseX_Full);
+               }
+               if (state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+                   state->interface == PHY_INTERFACE_MODE_NA) {
+                       phylink_set(mask, 2500baseT_Full);
+                       phylink_set(mask, 2500baseX_Full);
+               }
+               break;
+       default:
+               bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+               return;
+       }
+       bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+       bitmap_and(state->advertising, state->advertising, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void sparx5_phylink_mac_config(struct phylink_config *config,
+                                     unsigned int mode,
+                                     const struct phylink_link_state *state)
+{
+       /* Currently not used */
+}
+
+static void sparx5_phylink_mac_link_up(struct phylink_config *config,
+                                      struct phy_device *phy,
+                                      unsigned int mode,
+                                      phy_interface_t interface,
+                                      int speed, int duplex,
+                                      bool tx_pause, bool rx_pause)
+{
+       struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+       struct sparx5_port_config conf;
+       int err;
+
+       conf = port->conf;
+       conf.duplex = duplex;
+       conf.pause = 0;
+       conf.pause |= tx_pause ? MLO_PAUSE_TX : 0;
+       conf.pause |= rx_pause ? MLO_PAUSE_RX : 0;
+       conf.speed = speed;
+       /* Configure the port to speed/duplex/pause */
+       err = sparx5_port_config(port->sparx5, port, &conf);
+       if (err)
+               netdev_err(port->ndev, "port config failed: %d\n", err);
+}
+
+static void sparx5_phylink_mac_link_down(struct phylink_config *config,
+                                        unsigned int mode,
+                                        phy_interface_t interface)
+{
+       /* Currently not used */
+}
+
+static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
+{
+       return container_of(pcs, struct sparx5_port, phylink_pcs);
+}
+
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+                                struct phylink_link_state *state)
+{
+       struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+       struct sparx5_port_status status;
+
+       sparx5_get_port_status(port->sparx5, port, &status);
+       state->link = status.link && !status.link_down;
+       state->an_complete = status.an_complete;
+       state->speed = status.speed;
+       state->duplex = status.duplex;
+       state->pause = status.pause;
+}
+
+static int sparx5_pcs_config(struct phylink_pcs *pcs,
+                            unsigned int mode,
+                            phy_interface_t interface,
+                            const unsigned long *advertising,
+                            bool permit_pause_to_mac)
+{
+       struct sparx5_port *port = sparx5_pcs_to_port(pcs);
+       struct sparx5_port_config conf;
+       int ret = 0;
+
+       conf = port->conf;
+       conf.power_down = false;
+       conf.portmode = interface;
+       conf.inband = phylink_autoneg_inband(mode);
+       conf.autoneg = phylink_test(advertising, Autoneg);
+       conf.pause_adv = 0;
+       if (phylink_test(advertising, Pause))
+               conf.pause_adv |= ADVERTISE_1000XPAUSE;
+       if (phylink_test(advertising, Asym_Pause))
+               conf.pause_adv |= ADVERTISE_1000XPSE_ASYM;
+       if (sparx5_is_baser(interface)) {
+               if (phylink_test(advertising, FIBRE))
+                       conf.media = PHY_MEDIA_SR;
+               else
+                       conf.media = PHY_MEDIA_DAC;
+       }
+       if (!port_conf_has_changed(&port->conf, &conf))
+               return ret;
+       /* Enable the PCS matching this interface type */
+       ret = sparx5_port_pcs_set(port->sparx5, port, &conf);
+       if (ret)
+               netdev_err(port->ndev, "port PCS config failed: %d\n", ret);
+       return ret;
+}
+
+static void sparx5_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+       /* Currently not used */
+}
+
+const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
+       .pcs_get_state = sparx5_pcs_get_state,
+       .pcs_config = sparx5_pcs_config,
+       .pcs_an_restart = sparx5_pcs_aneg_restart,
+};
+
+const struct phylink_mac_ops sparx5_phylink_mac_ops = {
+       .validate = sparx5_phylink_validate,
+       .mac_config = sparx5_phylink_mac_config,
+       .mac_link_down = sparx5_phylink_mac_link_down,
+       .mac_link_up = sparx5_phylink_mac_link_up,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
new file mode 100644 (file)
index 0000000..d2e3250
--- /dev/null
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+#include "sparx5_port.h"
+
+#define SPX5_ETYPE_TAG_C     0x8100
+#define SPX5_ETYPE_TAG_S     0x88a8
+
+#define SPX5_WAIT_US         1000
+#define SPX5_WAIT_MAX_US     2000
+
+enum port_error {
+       SPX5_PERR_SPEED,
+       SPX5_PERR_IFTYPE,
+};
+
+#define PAUSE_DISCARD        0xC
+#define ETH_MAXLEN           (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
+
+static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
+{
+       status->an_complete = true;
+       if (!(lp_abil & LPA_SGMII_LINK)) {
+               status->link = false;
+               return;
+       }
+
+       switch (lp_abil & LPA_SGMII_SPD_MASK) {
+       case LPA_SGMII_10:
+               status->speed = SPEED_10;
+               break;
+       case LPA_SGMII_100:
+               status->speed = SPEED_100;
+               break;
+       case LPA_SGMII_1000:
+               status->speed = SPEED_1000;
+               break;
+       default:
+               status->link = false;
+               return;
+       }
+       if (lp_abil & LPA_SGMII_FULL_DUPLEX)
+               status->duplex = DUPLEX_FULL;
+       else
+               status->duplex = DUPLEX_HALF;
+}
+
+static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
+{
+       status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
+       status->an_complete = true;
+       status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
+               DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
+
+       if ((ld_abil & ADVERTISE_1000XPAUSE) &&
+           (lp_abil & ADVERTISE_1000XPAUSE)) {
+               status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
+       } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
+                  (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
+               status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
+                       MLO_PAUSE_TX : 0;
+               status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
+                       MLO_PAUSE_RX : 0;
+       } else {
+               status->pause = MLO_PAUSE_NONE;
+       }
+}
+
+static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
+                                   struct sparx5_port *port,
+                                   struct sparx5_port_status *status)
+{
+       u32 portno = port->portno;
+       u16 lp_adv, ld_adv;
+       u32 value;
+
+       /* Get PCS Link down sticky */
+       value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
+       status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
+       if (status->link_down)  /* Clear the sticky */
+               spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
+
+       /* Get both current Link and Sync status */
+       value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
+       status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
+                      DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
+
+       if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
+               status->speed = SPEED_1000;
+       else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
+               status->speed = SPEED_2500;
+
+       status->duplex = DUPLEX_FULL;
+
+       /* Get PCS ANEG status register */
+       value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
+
+       /* Aneg complete provides more information  */
+       if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
+               lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
+               if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
+                       decode_sgmii_word(lp_adv, status);
+               } else {
+                       value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
+                       ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
+                       decode_cl37_word(lp_adv, ld_adv, status);
+               }
+       }
+       return 0;
+}
+
+static int sparx5_get_sfi_status(struct sparx5 *sparx5,
+                                struct sparx5_port *port,
+                                struct sparx5_port_status *status)
+{
+       bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
+       u32 portno = port->portno;
+       u32 value, dev, tinst;
+       void __iomem *inst;
+
+       if (!high_speed_dev) {
+               netdev_err(port->ndev, "error: low speed and SFI mode\n");
+               return -EINVAL;
+       }
+
+       dev = sparx5_to_high_dev(portno);
+       tinst = sparx5_port_dev_index(portno);
+       inst = spx5_inst_get(sparx5, dev, tinst);
+
+       value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+       if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
+               /* The link is or has been down. Clear the sticky bit */
+               status->link_down = 1;
+               spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+               value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
+       }
+       status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
+       status->duplex = DUPLEX_FULL;
+       if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
+               status->speed = SPEED_5000;
+       else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
+               status->speed = SPEED_10000;
+       else
+               status->speed = SPEED_25000;
+
+       return 0;
+}
+
+/* Get link status of 1000Base-X/in-band and SFI ports.
+ */
+int sparx5_get_port_status(struct sparx5 *sparx5,
+                          struct sparx5_port *port,
+                          struct sparx5_port_status *status)
+{
+       memset(status, 0, sizeof(*status));
+       status->speed = port->conf.speed;
+       if (port->conf.power_down) {
+               status->link = false;
+               return 0;
+       }
+       switch (port->conf.portmode) {
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_1000BASEX:
+       case PHY_INTERFACE_MODE_2500BASEX:
+               return sparx5_get_dev2g5_status(sparx5, port, status);
+       case PHY_INTERFACE_MODE_5GBASER:
+       case PHY_INTERFACE_MODE_10GBASER:
+       case PHY_INTERFACE_MODE_25GBASER:
+               return sparx5_get_sfi_status(sparx5, port, status);
+       case PHY_INTERFACE_MODE_NA:
+               return 0;
+       default:
+               netdev_err(port->ndev, "Status not supported");
+               return -ENODEV;
+       }
+       return 0;
+}
+
+static int sparx5_port_error(struct sparx5_port *port,
+                            struct sparx5_port_config *conf,
+                            enum port_error errtype)
+{
+       switch (errtype) {
+       case SPX5_PERR_SPEED:
+               netdev_err(port->ndev,
+                          "Interface does not support speed: %u: for %s\n",
+                          conf->speed, phy_modes(conf->portmode));
+               break;
+       case SPX5_PERR_IFTYPE:
+               netdev_err(port->ndev,
+                          "Switch port does not support interface type: %s\n",
+                          phy_modes(conf->portmode));
+               break;
+       default:
+               netdev_err(port->ndev,
+                          "Interface configuration error\n");
+       }
+
+       return -EINVAL;
+}
+
+static int sparx5_port_verify_speed(struct sparx5 *sparx5,
+                                   struct sparx5_port *port,
+                                   struct sparx5_port_config *conf)
+{
+       if ((sparx5_port_is_2g5(port->portno) &&
+            conf->speed > SPEED_2500) ||
+           (sparx5_port_is_5g(port->portno)  &&
+            conf->speed > SPEED_5000) ||
+           (sparx5_port_is_10g(port->portno) &&
+            conf->speed > SPEED_10000))
+               return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+
+       switch (conf->portmode) {
+       case PHY_INTERFACE_MODE_NA:
+               return -EINVAL;
+       case PHY_INTERFACE_MODE_1000BASEX:
+               if (conf->speed != SPEED_1000 ||
+                   sparx5_port_is_2g5(port->portno))
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               if (sparx5_port_is_2g5(port->portno))
+                       return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+               break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+               if (conf->speed != SPEED_2500 ||
+                   sparx5_port_is_2g5(port->portno))
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               break;
+       case PHY_INTERFACE_MODE_QSGMII:
+               if (port->portno > 47)
+                       return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+               fallthrough;
+       case PHY_INTERFACE_MODE_SGMII:
+               if (conf->speed != SPEED_1000 &&
+                   conf->speed != SPEED_100 &&
+                   conf->speed != SPEED_10 &&
+                   conf->speed != SPEED_2500)
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               break;
+       case PHY_INTERFACE_MODE_5GBASER:
+       case PHY_INTERFACE_MODE_10GBASER:
+       case PHY_INTERFACE_MODE_25GBASER:
+               if ((conf->speed != SPEED_5000 &&
+                    conf->speed != SPEED_10000 &&
+                    conf->speed != SPEED_25000))
+                       return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+               break;
+       default:
+               return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
+       }
+       return 0;
+}
+
+static bool sparx5_dev_change(struct sparx5 *sparx5,
+                             struct sparx5_port *port,
+                             struct sparx5_port_config *conf)
+{
+       return sparx5_is_baser(port->conf.portmode) ^
+               sparx5_is_baser(conf->portmode);
+}
+
+static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
+{
+       u32  value, resource, prio, delay_cnt = 0;
+       bool poll_src = true;
+       char *mem = "";
+
+       /* Resource == 0: Memory tracked per source (SRC-MEM)
+        * Resource == 1: Frame references tracked per source (SRC-REF)
+        * Resource == 2: Memory tracked per destination (DST-MEM)
+        * Resource == 3: Frame references tracked per destination. (DST-REF)
+        */
+       while (1) {
+               bool empty = true;
+
+               for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
+                       u32 base;
+
+                       base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
+                       for (prio = 0; prio < SPX5_PRIOS; prio++) {
+                               value = spx5_rd(sparx5,
+                                               QRES_RES_STAT(base + prio));
+                               if (value) {
+                                       mem = resource == 0 ?
+                                               "DST-MEM" : "SRC-MEM";
+                                       empty = false;
+                               }
+                       }
+               }
+
+               if (empty)
+                       break;
+
+               if (delay_cnt++ == 2000) {
+                       dev_err(sparx5->dev,
+                               "Flush timeout port %u. %s queue not empty\n",
+                               portno, mem);
+                       return -EINVAL;
+               }
+
+               usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
+       }
+       return 0;
+}
+
+static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
+{
+       u32 tinst = high_spd_dev ?
+                   sparx5_port_dev_index(port->portno) : port->portno;
+       u32 dev = high_spd_dev ?
+                 sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
+       void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
+       u32 spd = port->conf.speed;
+       u32 spd_prm;
+       int err;
+
+       if (high_spd_dev) {
+               /* 1: Reset the PCS Rx clock domain  */
+               spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+                             DEV10G_DEV_RST_CTRL_PCS_RX_RST,
+                             devinst,
+                             DEV10G_DEV_RST_CTRL(0));
+
+               /* 2: Disable MAC frame reception */
+               spx5_inst_rmw(0,
+                             DEV10G_MAC_ENA_CFG_RX_ENA,
+                             devinst,
+                             DEV10G_MAC_ENA_CFG(0));
+       } else {
+               /* 1: Reset the PCS Rx clock domain  */
+               spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+                             DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+                             devinst,
+                             DEV2G5_DEV_RST_CTRL(0));
+               /* 2: Disable MAC frame reception */
+               spx5_inst_rmw(0,
+                             DEV2G5_MAC_ENA_CFG_RX_ENA,
+                             devinst,
+                             DEV2G5_MAC_ENA_CFG(0));
+       }
+       /* 3: Disable traffic being sent to or from switch port->portno */
+       spx5_rmw(0,
+                QFWD_SWITCH_PORT_MODE_PORT_ENA,
+                sparx5,
+                QFWD_SWITCH_PORT_MODE(port->portno));
+
+       /* 4: Disable dequeuing from the egress queues  */
+       spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
+                HSCH_PORT_MODE_DEQUEUE_DIS,
+                sparx5,
+                HSCH_PORT_MODE(port->portno));
+
+       /* 5: Disable Flowcontrol */
+       spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
+                QSYS_PAUSE_CFG_PAUSE_STOP,
+                sparx5,
+                QSYS_PAUSE_CFG(port->portno));
+
+       spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
+       /* 6: Wait while the last frame is exiting the queues */
+       usleep_range(8 * spd_prm, 10 * spd_prm);
+
+       /* 7: Flush the queues accociated with the port->portno */
+       spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+                HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
+                HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
+                HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
+                HSCH_FLUSH_CTRL_FLUSH_PORT |
+                HSCH_FLUSH_CTRL_FLUSH_DST |
+                HSCH_FLUSH_CTRL_FLUSH_SRC |
+                HSCH_FLUSH_CTRL_FLUSH_ENA,
+                sparx5,
+                HSCH_FLUSH_CTRL);
+
+       /* 8: Enable dequeuing from the egress queues */
+       spx5_rmw(0,
+                HSCH_PORT_MODE_DEQUEUE_DIS,
+                sparx5,
+                HSCH_PORT_MODE(port->portno));
+
+       /* 9: Wait until flushing is complete */
+       err = sparx5_port_flush_poll(sparx5, port->portno);
+       if (err)
+               return err;
+
+       /* 10: Reset the  MAC clock domain */
+       if (high_spd_dev) {
+               spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+                             DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
+                             DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
+                             DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+                             DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+                             DEV10G_DEV_RST_CTRL_MAC_TX_RST,
+                             devinst,
+                             DEV10G_DEV_RST_CTRL(0));
+
+       } else {
+               spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
+                             DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
+                             DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
+                             DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
+                             DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
+                             DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+                             DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+                             DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
+                             DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+                             DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+                             devinst,
+                             DEV2G5_DEV_RST_CTRL(0));
+       }
+       /* 11: Clear flushing */
+       spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
+                HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
+                HSCH_FLUSH_CTRL_FLUSH_PORT |
+                HSCH_FLUSH_CTRL_FLUSH_ENA,
+                sparx5,
+                HSCH_FLUSH_CTRL);
+
+       if (high_spd_dev) {
+               u32 pcs = sparx5_to_pcs_dev(port->portno);
+               void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
+
+               /* 12: Disable 5G/10G/25 BaseR PCS */
+               spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
+                             PCS10G_BR_PCS_CFG_PCS_ENA,
+                             pcsinst,
+                             PCS10G_BR_PCS_CFG(0));
+
+               if (sparx5_port_is_25g(port->portno))
+                       /* Disable 25G PCS */
+                       spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
+                                DEV25G_PCS25G_CFG_PCS25G_ENA,
+                                sparx5,
+                                DEV25G_PCS25G_CFG(tinst));
+       } else {
+               /* 12: Disable 1G PCS */
+               spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
+                        DEV2G5_PCS1G_CFG_PCS_ENA,
+                        sparx5,
+                        DEV2G5_PCS1G_CFG(port->portno));
+       }
+
+       /* The port is now flushed and disabled  */
+       return 0;
+}
+
+static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
+                              u32 portno, u32 speed)
+{
+       u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
+       const u32 taxi_dist[SPX5_PORTS_ALL] = {
+               6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
+               4, 4, 4, 4,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               11, 12, 13, 14, 15, 16, 17, 18,
+               4, 6, 8, 4, 6, 8, 6, 8,
+               2, 2, 2, 2, 2, 2, 2, 4, 2
+       };
+       u32 mac_per    = 6400, tmp1, tmp2, tmp3;
+       u32 fifo_width = 16;
+       u32 mac_width  = 8;
+       u32 addition   = 0;
+
+       switch (speed) {
+       case SPEED_25000:
+               return 0;
+       case SPEED_10000:
+               mac_per = 6400;
+               mac_width = 8;
+               addition = 1;
+               break;
+       case SPEED_5000:
+               mac_per = 12800;
+               mac_width = 8;
+               addition = 0;
+               break;
+       case SPEED_2500:
+               mac_per = 3200;
+               mac_width = 1;
+               addition = 0;
+               break;
+       case SPEED_1000:
+               mac_per =  8000;
+               mac_width = 1;
+               addition = 0;
+               break;
+       case SPEED_100:
+       case SPEED_10:
+               return 1;
+       default:
+               break;
+       }
+
+       tmp1 = 1000 * mac_width / fifo_width;
+       tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
+                      * sys_clk / mac_per);
+       tmp3 = tmp1 * tmp2 / 1000;
+       return  (tmp3 + 2000 + 999) / 1000 + addition;
+}
+
+/* Configure port muxing:
+ * QSGMII:     4x2G5 devices
+ */
+static int sparx5_port_mux_set(struct sparx5 *sparx5,
+                              struct sparx5_port *port,
+                              struct sparx5_port_config *conf)
+{
+       u32 portno = port->portno;
+       u32 inst;
+
+       if (port->conf.portmode == conf->portmode)
+               return 0; /* Nothing to do */
+
+       switch (conf->portmode) {
+       case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q'  */
+               inst = (portno - portno % 4) / 4;
+               spx5_rmw(BIT(inst),
+                        BIT(inst),
+                        sparx5,
+                        PORT_CONF_QSGMII_ENA);
+
+               if ((portno / 4 % 2) == 0) {
+                       /* Affects d0-d3,d8-d11..d40-d43 */
+                       spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
+                                PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
+                                PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
+                                PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
+                                PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
+                                PORT_CONF_USGMII_CFG_QUAD_MODE,
+                                sparx5,
+                                PORT_CONF_USGMII_CFG((portno / 8)));
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
+                                   struct sparx5_port *port)
+{
+       enum sparx5_port_max_tags max_tags    = port->max_vlan_tags;
+       int tag_ct          = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
+                             max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
+       bool dtag           = max_tags == SPX5_PORT_MAX_TAGS_TWO;
+       enum sparx5_vlan_port_type vlan_type  = port->vlan_type;
+       bool dotag          = max_tags != SPX5_PORT_MAX_TAGS_NONE;
+       u32 dev             = sparx5_to_high_dev(port->portno);
+       u32 tinst           = sparx5_port_dev_index(port->portno);
+       void __iomem *inst  = spx5_inst_get(sparx5, dev, tinst);
+       u32 etype;
+
+       etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+                port->custom_etype :
+                vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+                SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
+
+       spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+               DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+               DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+               DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+               sparx5,
+               DEV2G5_MAC_TAGS_CFG(port->portno));
+
+       if (sparx5_port_is_2g5(port->portno))
+               return 0;
+
+       spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+                     DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
+                     DEV10G_MAC_TAGS_CFG_TAG_ID |
+                     DEV10G_MAC_TAGS_CFG_TAG_ENA,
+                     inst,
+                     DEV10G_MAC_TAGS_CFG(0, 0));
+
+       spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
+                     DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
+                     inst,
+                     DEV10G_MAC_NUM_TAGS_CFG(0));
+
+       spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
+                     DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
+                     inst,
+                     DEV10G_MAC_MAXLEN_CFG(0));
+       return 0;
+}
+
+static int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
+{
+       u32 clk_period_ps = 1600; /* 625Mhz for now */
+       u32 urg = 672000;
+
+       switch (speed) {
+       case SPEED_10:
+       case SPEED_100:
+       case SPEED_1000:
+               urg = 672000;
+               break;
+       case SPEED_2500:
+               urg = 270000;
+               break;
+       case SPEED_5000:
+               urg = 135000;
+               break;
+       case SPEED_10000:
+               urg = 67200;
+               break;
+       case SPEED_25000:
+               urg = 27000;
+               break;
+       }
+       return urg / clk_period_ps - 1;
+}
+
+static u16 sparx5_wm_enc(u16 value)
+{
+       if (value >= 2048)
+               return 2048 + value / 16;
+
+       return value;
+}
+
+static int sparx5_port_fc_setup(struct sparx5 *sparx5,
+                               struct sparx5_port *port,
+                               struct sparx5_port_config *conf)
+{
+       bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
+       u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
+
+       if (conf->pause & MLO_PAUSE_TX)
+               pause_stop = sparx5_wm_enc(4  * (ETH_MAXLEN /
+                                                SPX5_BUFFER_CELL_SZ));
+
+       /* Set HDX flowcontrol */
+       spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
+                DSM_MAC_CFG_HDX_BACKPREASSURE,
+                sparx5,
+                DSM_MAC_CFG(port->portno));
+
+       /* Obey flowcontrol  */
+       spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
+                DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
+                sparx5,
+                DSM_RX_PAUSE_CFG(port->portno));
+
+       /* Disable forward pressure */
+       spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
+                QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
+                sparx5,
+                QSYS_FWD_PRESSURE(port->portno));
+
+       /* Generate pause frames */
+       spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
+                QSYS_PAUSE_CFG_PAUSE_STOP,
+                sparx5,
+                QSYS_PAUSE_CFG(port->portno));
+
+       return 0;
+}
+
+static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
+{
+       if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
+               return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
+       else
+               return 1; /* Enable SGMII Aneg */
+}
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+                     struct sparx5_port *port,
+                     struct sparx5_port_config *conf)
+{
+       int portmode, err, speed = conf->speed;
+
+       if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
+           ((port->portno % 4) != 0)) {
+               return 0;
+       }
+       if (sparx5_is_baser(conf->portmode)) {
+               if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
+                       speed = SPEED_25000;
+               else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
+                       speed = SPEED_10000;
+               else
+                       speed = SPEED_5000;
+       }
+
+       err = phy_set_media(port->serdes, conf->media);
+       if (err)
+               return err;
+       if (speed > 0) {
+               err = phy_set_speed(port->serdes, speed);
+               if (err)
+                       return err;
+       }
+       if (conf->serdes_reset) {
+               err = phy_reset(port->serdes);
+               if (err)
+                       return err;
+       }
+
+       /* Configure SerDes with port parameters
+        * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
+        */
+       portmode = conf->portmode;
+       if (sparx5_is_baser(conf->portmode))
+               portmode = PHY_INTERFACE_MODE_10GBASER;
+       err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
+       if (err)
+               return err;
+       conf->serdes_reset = false;
+       return err;
+}
+
+static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
+                                  struct sparx5_port *port,
+                                  struct sparx5_port_config *conf)
+{
+       bool sgmii = false, inband_aneg = false;
+       int err;
+
+       if (port->conf.inband) {
+               if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
+                   conf->portmode == PHY_INTERFACE_MODE_QSGMII)
+                       inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+               else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+                        conf->autoneg)
+                       inband_aneg = true; /* Clause-37 in-band-aneg */
+
+               err = sparx5_serdes_set(sparx5, port, conf);
+               if (err)
+                       return -EINVAL;
+       } else {
+               sgmii = true; /* Phy is connnected to the MAC */
+       }
+
+       /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
+       spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
+                DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+                sparx5,
+                DEV2G5_PCS1G_MODE_CFG(port->portno));
+
+       /* Enable PCS */
+       spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
+               sparx5,
+               DEV2G5_PCS1G_CFG(port->portno));
+
+       if (inband_aneg) {
+               u16 abil = sparx5_get_aneg_word(conf);
+
+               /* Enable in-band aneg */
+               spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
+                       DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+                       DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
+                       DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
+                       sparx5,
+                       DEV2G5_PCS1G_ANEG_CFG(port->portno));
+       } else {
+               spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
+       }
+
+       /* Take PCS out of reset */
+       spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
+                DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+                DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
+                DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+                DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
+                DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
+                sparx5,
+                DEV2G5_DEV_RST_CTRL(port->portno));
+
+       return 0;
+}
+
+static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
+                                   struct sparx5_port *port,
+                                   struct sparx5_port_config *conf)
+{
+       u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
+       u32 pix = sparx5_port_dev_index(port->portno);
+       u32 dev = sparx5_to_high_dev(port->portno);
+       u32 pcs = sparx5_to_pcs_dev(port->portno);
+       void __iomem *devinst;
+       void __iomem *pcsinst;
+       int err;
+
+       devinst = spx5_inst_get(sparx5, dev, pix);
+       pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+       /*  SFI : No in-band-aneg. Speeds 5G/10G/25G */
+       err = sparx5_serdes_set(sparx5, port, conf);
+       if (err)
+               return -EINVAL;
+       if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
+               /* Enable PCS for 25G device, speed 25G */
+               spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
+                        DEV25G_PCS25G_CFG_PCS25G_ENA,
+                        sparx5,
+                        DEV25G_PCS25G_CFG(pix));
+       } else {
+               /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
+               spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
+                             PCS10G_BR_PCS_CFG_PCS_ENA,
+                             pcsinst,
+                             PCS10G_BR_PCS_CFG(0));
+       }
+
+       /* Enable 5G/10G/25G MAC module */
+       spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
+                    DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
+                    devinst,
+                    DEV10G_MAC_ENA_CFG(0));
+
+       /* Take the device out of reset */
+       spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+                     DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
+                     DEV10G_DEV_RST_CTRL_PCS_RX_RST |
+                     DEV10G_DEV_RST_CTRL_PCS_TX_RST |
+                     DEV10G_DEV_RST_CTRL_MAC_RX_RST |
+                     DEV10G_DEV_RST_CTRL_MAC_TX_RST |
+                     DEV10G_DEV_RST_CTRL_SPEED_SEL,
+                     devinst,
+                     DEV10G_DEV_RST_CTRL(0));
+
+       return 0;
+}
+
+/* Switch between 1G/2500 and 5G/10G/25G devices */
+static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
+{
+       int bt_indx = BIT(sparx5_port_dev_index(port));
+
+       if (sparx5_port_is_5g(port)) {
+               spx5_rmw(hsd ? 0 : bt_indx,
+                        bt_indx,
+                        sparx5,
+                        PORT_CONF_DEV5G_MODES);
+       } else if (sparx5_port_is_10g(port)) {
+               spx5_rmw(hsd ? 0 : bt_indx,
+                        bt_indx,
+                        sparx5,
+                        PORT_CONF_DEV10G_MODES);
+       } else if (sparx5_port_is_25g(port)) {
+               spx5_rmw(hsd ? 0 : bt_indx,
+                        bt_indx,
+                        sparx5,
+                        PORT_CONF_DEV25G_MODES);
+       }
+}
+
+/* Configure speed/duplex dependent registers */
+static int sparx5_port_config_low_set(struct sparx5 *sparx5,
+                                     struct sparx5_port *port,
+                                     struct sparx5_port_config *conf)
+{
+       u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
+       bool fdx = conf->duplex == DUPLEX_FULL;
+       int spd = conf->speed;
+
+       clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
+       gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
+       tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
+       hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
+       hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
+
+       /* GIG/FDX mode */
+       spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
+                DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
+                DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
+                DEV2G5_MAC_MODE_CFG_FDX_ENA,
+                sparx5,
+                DEV2G5_MAC_MODE_CFG(port->portno));
+
+       /* Set MAC IFG Gaps */
+       spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
+               sparx5,
+               DEV2G5_MAC_IFG_CFG(port->portno));
+
+       /* Disabling frame aging when in HDX (due to HDX issue) */
+       spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
+                HSCH_PORT_MODE_AGE_DIS,
+                sparx5,
+                HSCH_PORT_MODE(port->portno));
+
+       /* Enable MAC module */
+       spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
+               DEV2G5_MAC_ENA_CFG_TX_ENA,
+               sparx5,
+               DEV2G5_MAC_ENA_CFG(port->portno));
+
+       /* Select speed and take MAC out of reset */
+       spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
+                DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
+                DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
+                DEV2G5_DEV_RST_CTRL_SPEED_SEL |
+                DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
+                DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
+                sparx5,
+                DEV2G5_DEV_RST_CTRL(port->portno));
+
+       return 0;
+}
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+                       struct sparx5_port *port,
+                       struct sparx5_port_config *conf)
+
+{
+       bool high_speed_dev = sparx5_is_baser(conf->portmode);
+       int err;
+
+       if (sparx5_dev_change(sparx5, port, conf)) {
+               /* switch device */
+               sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
+
+               /* Disable the not-in-use device */
+               err = sparx5_port_disable(sparx5, port, !high_speed_dev);
+               if (err)
+                       return err;
+       }
+       /* Disable the port before re-configuring */
+       err = sparx5_port_disable(sparx5, port, high_speed_dev);
+       if (err)
+               return -EINVAL;
+
+       if (high_speed_dev)
+               err = sparx5_port_pcs_high_set(sparx5, port, conf);
+       else
+               err = sparx5_port_pcs_low_set(sparx5, port, conf);
+
+       if (err)
+               return -EINVAL;
+
+       if (port->conf.inband) {
+               /* Enable/disable 1G counters in ASM */
+               spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+                        ASM_PORT_CFG_CSC_STAT_DIS,
+                        sparx5,
+                        ASM_PORT_CFG(port->portno));
+
+               /* Enable/disable 1G counters in DSM */
+               spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
+                        DSM_BUF_CFG_CSC_STAT_DIS,
+                        sparx5,
+                        DSM_BUF_CFG(port->portno));
+       }
+
+       port->conf = *conf;
+
+       return 0;
+}
+
+int sparx5_port_config(struct sparx5 *sparx5,
+                      struct sparx5_port *port,
+                      struct sparx5_port_config *conf)
+{
+       bool high_speed_dev = sparx5_is_baser(conf->portmode);
+       int err, urgency, stop_wm;
+
+       err = sparx5_port_verify_speed(sparx5, port, conf);
+       if (err)
+               return err;
+
+       /* high speed device is already configured */
+       if (!high_speed_dev)
+               sparx5_port_config_low_set(sparx5, port, conf);
+
+       /* Configure flow control */
+       err = sparx5_port_fc_setup(sparx5, port, conf);
+       if (err)
+               return err;
+
+       /* Set the DSM stop watermark */
+       stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
+       spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
+                DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
+                sparx5,
+                DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+       /* Enable port in queue system */
+       urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
+       spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
+                QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
+                QFWD_SWITCH_PORT_MODE_PORT_ENA |
+                QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
+                sparx5,
+                QFWD_SWITCH_PORT_MODE(port->portno));
+
+       /* Save the new values */
+       port->conf = *conf;
+
+       return 0;
+}
+
+/* Initialize port config to default */
+int sparx5_port_init(struct sparx5 *sparx5,
+                    struct sparx5_port *port,
+                    struct sparx5_port_config *conf)
+{
+       u32 pause_start = sparx5_wm_enc(6  * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+       u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
+       u32 devhigh = sparx5_to_high_dev(port->portno);
+       u32 pix = sparx5_port_dev_index(port->portno);
+       u32 pcs = sparx5_to_pcs_dev(port->portno);
+       bool sd_pol = port->signd_active_high;
+       bool sd_sel = !port->signd_internal;
+       bool sd_ena = port->signd_enable;
+       u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
+       void __iomem *devinst;
+       void __iomem *pcsinst;
+       int err;
+
+       devinst = spx5_inst_get(sparx5, devhigh, pix);
+       pcsinst = spx5_inst_get(sparx5, pcs, pix);
+
+       /* Set the mux port mode  */
+       err = sparx5_port_mux_set(sparx5, port, conf);
+       if (err)
+               return err;
+
+       /* Configure MAC vlan awareness */
+       err = sparx5_port_max_tags_set(sparx5, port);
+       if (err)
+               return err;
+
+       /* Set Max Length */
+       spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+                DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+                sparx5,
+                DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+       /* 1G/2G5: Signal Detect configuration */
+       spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+               DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+               DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+               sparx5,
+               DEV2G5_PCS1G_SD_CFG(port->portno));
+
+       /* Set Pause WM hysteresis */
+       spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
+                QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
+                QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
+                QSYS_PAUSE_CFG_PAUSE_START |
+                QSYS_PAUSE_CFG_PAUSE_STOP |
+                QSYS_PAUSE_CFG_PAUSE_ENA,
+                sparx5,
+                QSYS_PAUSE_CFG(port->portno));
+
+       /* Port ATOP. Frames are tail dropped when this WM is hit */
+       spx5_wr(QSYS_ATOP_ATOP_SET(atop),
+               sparx5,
+               QSYS_ATOP(port->portno));
+
+       /* Discard pause frame 01-80-C2-00-00-01 */
+       spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
+
+       if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
+           conf->portmode == PHY_INTERFACE_MODE_SGMII) {
+               err = sparx5_serdes_set(sparx5, port, conf);
+               if (err)
+                       return err;
+
+               if (!sparx5_port_is_2g5(port->portno))
+                       /* Enable shadow device */
+                       spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
+                                DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
+                                sparx5,
+                                DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
+               sparx5_dev_switch(sparx5, port->portno, false);
+       }
+       if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
+               // All ports must be PCS enabled in QSGMII mode
+               spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
+                        DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
+                        sparx5,
+                        DEV2G5_DEV_RST_CTRL(port->portno));
+       }
+       /* Default IFGs for 1G */
+       spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
+               DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
+               sparx5,
+               DEV2G5_MAC_IFG_CFG(port->portno));
+
+       if (sparx5_port_is_2g5(port->portno))
+               return 0; /* Low speed device only - return */
+
+       /* Now setup the high speed device */
+       if (conf->portmode == PHY_INTERFACE_MODE_NA)
+               conf->portmode = PHY_INTERFACE_MODE_10GBASER;
+
+       if (sparx5_is_baser(conf->portmode))
+               sparx5_dev_switch(sparx5, port->portno, true);
+
+       /* Set Max Length */
+       spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+                     DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
+                     devinst,
+                     DEV10G_MAC_ENA_CFG(0));
+
+       /* Handle Signal Detect in 10G PCS */
+       spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
+                    PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
+                    PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
+                    pcsinst,
+                    PCS10G_BR_PCS_SD_CFG(0));
+
+       if (sparx5_port_is_25g(port->portno)) {
+               /* Handle Signal Detect in 25G PCS */
+               spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
+                       DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
+                       DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
+                       sparx5,
+                       DEV25G_PCS25G_SD_CFG(pix));
+       }
+
+       return 0;
+}
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       /* Enable port for frame transfer? */
+       spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
+                QFWD_SWITCH_PORT_MODE_PORT_ENA,
+                sparx5,
+                QFWD_SWITCH_PORT_MODE(port->portno));
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
new file mode 100644 (file)
index 0000000..fd05ab6
--- /dev/null
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_PORT_H__
+#define __SPARX5_PORT_H__
+
+#include "sparx5_main.h"
+
+static inline bool sparx5_port_is_2g5(int portno)
+{
+       return portno >= 16 && portno <= 47;
+}
+
+static inline bool sparx5_port_is_5g(int portno)
+{
+       return portno <= 11 || portno == 64;
+}
+
+static inline bool sparx5_port_is_10g(int portno)
+{
+       return (portno >= 12 && portno <= 15) || (portno >= 48 && portno <= 55);
+}
+
+static inline bool sparx5_port_is_25g(int portno)
+{
+       return portno >= 56 && portno <= 63;
+}
+
+static inline u32 sparx5_to_high_dev(int port)
+{
+       if (sparx5_port_is_5g(port))
+               return TARGET_DEV5G;
+       if (sparx5_port_is_10g(port))
+               return TARGET_DEV10G;
+       return TARGET_DEV25G;
+}
+
+static inline u32 sparx5_to_pcs_dev(int port)
+{
+       if (sparx5_port_is_5g(port))
+               return TARGET_PCS5G_BR;
+       if (sparx5_port_is_10g(port))
+               return TARGET_PCS10G_BR;
+       return TARGET_PCS25G_BR;
+}
+
+static inline int sparx5_port_dev_index(int port)
+{
+       if (sparx5_port_is_2g5(port))
+               return port;
+       if (sparx5_port_is_5g(port))
+               return (port <= 11 ? port : 12);
+       if (sparx5_port_is_10g(port))
+               return (port >= 12 && port <= 15) ?
+                       port - 12 : port - 44;
+       return (port - 56);
+}
+
+int sparx5_port_init(struct sparx5 *sparx5,
+                    struct sparx5_port *spx5_port,
+                    struct sparx5_port_config *conf);
+
+int sparx5_port_config(struct sparx5 *sparx5,
+                      struct sparx5_port *spx5_port,
+                      struct sparx5_port_config *conf);
+
+int sparx5_port_pcs_set(struct sparx5 *sparx5,
+                       struct sparx5_port *port,
+                       struct sparx5_port_config *conf);
+
+int sparx5_serdes_set(struct sparx5 *sparx5,
+                     struct sparx5_port *spx5_port,
+                     struct sparx5_port_config *conf);
+
+struct sparx5_port_status {
+       bool link;
+       bool link_down;
+       int  speed;
+       bool an_complete;
+       int  duplex;
+       int  pause;
+};
+
+int sparx5_get_port_status(struct sparx5 *sparx5,
+                          struct sparx5_port *port,
+                          struct sparx5_port_status *status);
+
+void sparx5_port_enable(struct sparx5_port *port, bool enable);
+
+#endif /* __SPARX5_PORT_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
new file mode 100644 (file)
index 0000000..a72e3b3
--- /dev/null
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static struct workqueue_struct *sparx5_owq;
+
+struct sparx5_switchdev_event_work {
+       struct work_struct work;
+       struct switchdev_notifier_fdb_info fdb_info;
+       struct net_device *dev;
+       unsigned long event;
+};
+
+static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
+                                         struct switchdev_brport_flags flags)
+{
+       if (flags.mask & BR_MCAST_FLOOD)
+               sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
+}
+
+static void sparx5_attr_stp_state_set(struct sparx5_port *port,
+                                     u8 state)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       if (!test_bit(port->portno, sparx5->bridge_mask)) {
+               netdev_err(port->ndev,
+                          "Controlling non-bridged port %d?\n", port->portno);
+               return;
+       }
+
+       switch (state) {
+       case BR_STATE_FORWARDING:
+               set_bit(port->portno, sparx5->bridge_fwd_mask);
+               fallthrough;
+       case BR_STATE_LEARNING:
+               set_bit(port->portno, sparx5->bridge_lrn_mask);
+               break;
+
+       default:
+               /* All other states treated as blocking */
+               clear_bit(port->portno, sparx5->bridge_fwd_mask);
+               clear_bit(port->portno, sparx5->bridge_lrn_mask);
+               break;
+       }
+
+       /* apply the bridge_fwd_mask to all the ports */
+       sparx5_update_fwd(sparx5);
+}
+
+static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
+                                       unsigned long ageing_clock_t)
+{
+       unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+       u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
+
+       sparx5_set_ageing(port->sparx5, ageing_time);
+}
+
+static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
+                               const struct switchdev_attr *attr,
+                               struct netlink_ext_ack *extack)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+               sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
+               break;
+       case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+               sparx5_attr_stp_state_set(port, attr->u.stp_state);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+               sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+               port->vlan_aware = attr->u.vlan_filtering;
+               sparx5_vlan_port_apply(port->sparx5, port);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int sparx5_port_bridge_join(struct sparx5_port *port,
+                                  struct net_device *bridge)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+               /* First bridged port */
+               sparx5->hw_bridge_dev = bridge;
+       else
+               if (sparx5->hw_bridge_dev != bridge)
+                       /* This is adding the port to a second bridge, this is
+                        * unsupported
+                        */
+                       return -ENODEV;
+
+       set_bit(port->portno, sparx5->bridge_mask);
+
+       /* Port enters in bridge mode therefor don't need to copy to CPU
+        * frames for multicast in case the bridge is not requesting them
+        */
+       __dev_mc_unsync(port->ndev, sparx5_mc_unsync);
+
+       return 0;
+}
+
+static void sparx5_port_bridge_leave(struct sparx5_port *port,
+                                    struct net_device *bridge)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+
+       clear_bit(port->portno, sparx5->bridge_mask);
+       if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
+               sparx5->hw_bridge_dev = NULL;
+
+       /* Clear bridge vlan settings before updating the port settings */
+       port->vlan_aware = 0;
+       port->pvid = NULL_VID;
+       port->vid = NULL_VID;
+
+       /* Port enters in host more therefore restore mc list */
+       __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
+}
+
+static int sparx5_port_changeupper(struct net_device *dev,
+                                  struct netdev_notifier_changeupper_info *info)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       int err = 0;
+
+       if (netif_is_bridge_master(info->upper_dev)) {
+               if (info->linking)
+                       err = sparx5_port_bridge_join(port, info->upper_dev);
+               else
+                       sparx5_port_bridge_leave(port, info->upper_dev);
+
+               sparx5_vlan_port_apply(port->sparx5, port);
+       }
+
+       return err;
+}
+
+static int sparx5_port_add_addr(struct net_device *dev, bool up)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       struct sparx5 *sparx5 = port->sparx5;
+       u16 vid = port->pvid;
+
+       if (up)
+               sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
+       else
+               sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
+
+       return 0;
+}
+
+static int sparx5_netdevice_port_event(struct net_device *dev,
+                                      struct notifier_block *nb,
+                                      unsigned long event, void *ptr)
+{
+       int err = 0;
+
+       if (!sparx5_netdevice_check(dev))
+               return 0;
+
+       switch (event) {
+       case NETDEV_CHANGEUPPER:
+               err = sparx5_port_changeupper(dev, ptr);
+               break;
+       case NETDEV_PRE_UP:
+               err = sparx5_port_add_addr(dev, true);
+               break;
+       case NETDEV_DOWN:
+               err = sparx5_port_add_addr(dev, false);
+               break;
+       }
+
+       return err;
+}
+
+static int sparx5_netdevice_event(struct notifier_block *nb,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       int ret = 0;
+
+       ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
+
+       return notifier_from_errno(ret);
+}
+
+static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
+{
+       struct sparx5_switchdev_event_work *switchdev_work =
+               container_of(work, struct sparx5_switchdev_event_work, work);
+       struct net_device *dev = switchdev_work->dev;
+       struct switchdev_notifier_fdb_info *fdb_info;
+       struct sparx5_port *port;
+       struct sparx5 *sparx5;
+
+       rtnl_lock();
+       if (!sparx5_netdevice_check(dev))
+               goto out;
+
+       port = netdev_priv(dev);
+       sparx5 = port->sparx5;
+
+       fdb_info = &switchdev_work->fdb_info;
+
+       switch (switchdev_work->event) {
+       case SWITCHDEV_FDB_ADD_TO_DEVICE:
+               if (!fdb_info->added_by_user)
+                       break;
+               sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
+                                     fdb_info->vid);
+               break;
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               if (!fdb_info->added_by_user)
+                       break;
+               sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
+               break;
+       }
+
+out:
+       rtnl_unlock();
+       kfree(switchdev_work->fdb_info.addr);
+       kfree(switchdev_work);
+       dev_put(dev);
+}
+
+static void sparx5_schedule_work(struct work_struct *work)
+{
+       queue_work(sparx5_owq, work);
+}
+
+static int sparx5_switchdev_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       struct sparx5_switchdev_event_work *switchdev_work;
+       struct switchdev_notifier_fdb_info *fdb_info;
+       struct switchdev_notifier_info *info = ptr;
+       int err;
+
+       switch (event) {
+       case SWITCHDEV_PORT_ATTR_SET:
+               err = switchdev_handle_port_attr_set(dev, ptr,
+                                                    sparx5_netdevice_check,
+                                                    sparx5_port_attr_set);
+               return notifier_from_errno(err);
+       case SWITCHDEV_FDB_ADD_TO_DEVICE:
+               fallthrough;
+       case SWITCHDEV_FDB_DEL_TO_DEVICE:
+               switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+               if (!switchdev_work)
+                       return NOTIFY_BAD;
+
+               switchdev_work->dev = dev;
+               switchdev_work->event = event;
+
+               fdb_info = container_of(info,
+                                       struct switchdev_notifier_fdb_info,
+                                       info);
+               INIT_WORK(&switchdev_work->work,
+                         sparx5_switchdev_bridge_fdb_event_work);
+               memcpy(&switchdev_work->fdb_info, ptr,
+                      sizeof(switchdev_work->fdb_info));
+               switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+               if (!switchdev_work->fdb_info.addr)
+                       goto err_addr_alloc;
+
+               ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+                               fdb_info->addr);
+               dev_hold(dev);
+
+               sparx5_schedule_work(&switchdev_work->work);
+               break;
+       }
+
+       return NOTIFY_DONE;
+err_addr_alloc:
+       kfree(switchdev_work);
+       return NOTIFY_BAD;
+}
+
+static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
+                                     struct sparx5_port *port,
+                                     u16 vid, bool add)
+{
+       if (!port ||
+           !test_bit(port->portno, sparx5->bridge_mask))
+               return; /* Skip null/host interfaces */
+
+       /* Bridge connects to vid? */
+       if (add) {
+               /* Add port MAC address from the VLAN */
+               sparx5_mact_learn(sparx5, PGID_CPU,
+                                 port->ndev->dev_addr, vid);
+       } else {
+               /* Control port addr visibility depending on
+                * port VLAN connectivity.
+                */
+               if (test_bit(port->portno, sparx5->vlan_mask[vid]))
+                       sparx5_mact_learn(sparx5, PGID_CPU,
+                                         port->ndev->dev_addr, vid);
+               else
+                       sparx5_mact_forget(sparx5,
+                                          port->ndev->dev_addr, vid);
+       }
+}
+
+static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
+                                       struct sparx5 *sparx5,
+                                       u16 vid, bool add)
+{
+       int i;
+
+       /* First, handle bridge address'es */
+       if (add) {
+               sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
+                                 vid);
+               sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+                                 vid);
+       } else {
+               sparx5_mact_forget(sparx5, dev->dev_addr, vid);
+               sparx5_mact_forget(sparx5, dev->broadcast, vid);
+       }
+
+       /* Now look at bridged ports */
+       for (i = 0; i < SPX5_PORTS; i++)
+               sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
+}
+
+static int sparx5_handle_port_vlan_add(struct net_device *dev,
+                                      struct notifier_block *nb,
+                                      const struct switchdev_obj_port_vlan *v)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+
+       if (netif_is_bridge_master(dev)) {
+               if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
+                       struct sparx5 *sparx5 =
+                               container_of(nb, struct sparx5,
+                                            switchdev_blocking_nb);
+
+                       sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
+               }
+               return 0;
+       }
+
+       if (!sparx5_netdevice_check(dev))
+               return -EOPNOTSUPP;
+
+       return sparx5_vlan_vid_add(port, v->vid,
+                                 v->flags & BRIDGE_VLAN_INFO_PVID,
+                                 v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+}
+
+static int sparx5_handle_port_obj_add(struct net_device *dev,
+                                     struct notifier_block *nb,
+                                     struct switchdev_notifier_port_obj_info *info)
+{
+       const struct switchdev_obj *obj = info->obj;
+       int err;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               err = sparx5_handle_port_vlan_add(dev, nb,
+                                                 SWITCHDEV_OBJ_PORT_VLAN(obj));
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       info->handled = true;
+       return err;
+}
+
+static int sparx5_handle_port_vlan_del(struct net_device *dev,
+                                      struct notifier_block *nb,
+                                      u16 vid)
+{
+       struct sparx5_port *port = netdev_priv(dev);
+       int ret;
+
+       /* Master bridge? */
+       if (netif_is_bridge_master(dev)) {
+               struct sparx5 *sparx5 =
+                       container_of(nb, struct sparx5,
+                                    switchdev_blocking_nb);
+
+               sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
+               return 0;
+       }
+
+       if (!sparx5_netdevice_check(dev))
+               return -EOPNOTSUPP;
+
+       ret = sparx5_vlan_vid_del(port, vid);
+       if (ret)
+               return ret;
+
+       /* Delete the port MAC address with the matching VLAN information */
+       sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
+
+       return 0;
+}
+
+static int sparx5_handle_port_obj_del(struct net_device *dev,
+                                     struct notifier_block *nb,
+                                     struct switchdev_notifier_port_obj_info *info)
+{
+       const struct switchdev_obj *obj = info->obj;
+       int err;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               err = sparx5_handle_port_vlan_del(dev, nb,
+                                                 SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       info->handled = true;
+       return err;
+}
+
+static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
+                                          unsigned long event,
+                                          void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       int err;
+
+       switch (event) {
+       case SWITCHDEV_PORT_OBJ_ADD:
+               err = sparx5_handle_port_obj_add(dev, nb, ptr);
+               return notifier_from_errno(err);
+       case SWITCHDEV_PORT_OBJ_DEL:
+               err = sparx5_handle_port_obj_del(dev, nb, ptr);
+               return notifier_from_errno(err);
+       case SWITCHDEV_PORT_ATTR_SET:
+               err = switchdev_handle_port_attr_set(dev, ptr,
+                                                    sparx5_netdevice_check,
+                                                    sparx5_port_attr_set);
+               return notifier_from_errno(err);
+       }
+
+       return NOTIFY_DONE;
+}
+
+int sparx5_register_notifier_blocks(struct sparx5 *s5)
+{
+       int err;
+
+       s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
+       err = register_netdevice_notifier(&s5->netdevice_nb);
+       if (err)
+               return err;
+
+       s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
+       err = register_switchdev_notifier(&s5->switchdev_nb);
+       if (err)
+               goto err_switchdev_nb;
+
+       s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
+       err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+       if (err)
+               goto err_switchdev_blocking_nb;
+
+       sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
+       if (!sparx5_owq) {
+               err = -ENOMEM;
+               goto err_switchdev_blocking_nb;
+       }
+
+       return 0;
+
+err_switchdev_blocking_nb:
+       unregister_switchdev_notifier(&s5->switchdev_nb);
+err_switchdev_nb:
+       unregister_netdevice_notifier(&s5->netdevice_nb);
+
+       return err;
+}
+
+void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
+{
+       destroy_workqueue(sparx5_owq);
+
+       unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
+       unregister_switchdev_notifier(&s5->switchdev_nb);
+       unregister_netdevice_notifier(&s5->netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
new file mode 100644 (file)
index 0000000..4ce490a
--- /dev/null
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
+{
+       u32 mask[3];
+
+       /* Divide up mask in 32 bit words */
+       bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
+
+       /* Output mask to respective registers */
+       spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
+       spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
+       spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+
+       return 0;
+}
+
+void sparx5_vlan_init(struct sparx5 *sparx5)
+{
+       u16 vid;
+
+       spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
+                ANA_L3_VLAN_CTRL_VLAN_ENA,
+                sparx5,
+                ANA_L3_VLAN_CTRL);
+
+       /* Map VLAN = FID */
+       for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
+               spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
+                        ANA_L3_VLAN_CFG_VLAN_FID,
+                        sparx5,
+                        ANA_L3_VLAN_CFG(vid));
+}
+
+void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
+{
+       struct sparx5_port *port = sparx5->ports[portno];
+
+       /* Configure PVID */
+       spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
+                ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
+                ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
+                ANA_CL_VLAN_CTRL_PORT_VID,
+                sparx5,
+                ANA_CL_VLAN_CTRL(port->portno));
+}
+
+int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
+                       bool untagged)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+       int ret;
+
+       /* Make the port a member of the VLAN */
+       set_bit(port->portno, sparx5->vlan_mask[vid]);
+       ret = sparx5_vlant_set_mask(sparx5, vid);
+       if (ret)
+               return ret;
+
+       /* Default ingress vlan classification */
+       if (pvid)
+               port->pvid = vid;
+
+       /* Untagged egress vlan classification */
+       if (untagged && port->vid != vid) {
+               if (port->vid) {
+                       netdev_err(port->ndev,
+                                  "Port already has a native VLAN: %d\n",
+                                  port->vid);
+                       return -EBUSY;
+               }
+               port->vid = vid;
+       }
+
+       sparx5_vlan_port_apply(sparx5, port);
+
+       return 0;
+}
+
+int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+       int ret;
+
+       /* 8021q removes VID 0 on module unload for all interfaces
+        * with VLAN filtering feature. We need to keep it to receive
+        * untagged traffic.
+        */
+       if (vid == 0)
+               return 0;
+
+       /* Stop the port from being a member of the vlan */
+       clear_bit(port->portno, sparx5->vlan_mask[vid]);
+       ret = sparx5_vlant_set_mask(sparx5, vid);
+       if (ret)
+               return ret;
+
+       /* Ingress */
+       if (port->pvid == vid)
+               port->pvid = 0;
+
+       /* Egress */
+       if (port->vid == vid)
+               port->vid = 0;
+
+       sparx5_vlan_port_apply(sparx5, port);
+
+       return 0;
+}
+
+void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
+{
+       struct sparx5 *sparx5 = port->sparx5;
+       u32 val, mask;
+
+       /* mask is spread across 3 registers x 32 bit */
+       if (port->portno < 32) {
+               mask = BIT(port->portno);
+               val = enable ? mask : 0;
+               spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
+       } else if (port->portno < 64) {
+               mask = BIT(port->portno - 32);
+               val = enable ? mask : 0;
+               spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
+       } else if (port->portno < SPX5_PORTS) {
+               mask = BIT(port->portno - 64);
+               val = enable ? mask : 0;
+               spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
+       } else {
+               netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
+       }
+}
+
+void sparx5_update_fwd(struct sparx5 *sparx5)
+{
+       DECLARE_BITMAP(workmask, SPX5_PORTS);
+       u32 mask[3];
+       int port;
+
+       /* Divide up fwd mask in 32 bit words */
+       bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+
+       /* Update flood masks */
+       for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
+               spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
+               spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
+               spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
+       }
+
+       /* Update SRC masks */
+       for (port = 0; port < SPX5_PORTS; port++) {
+               if (test_bit(port, sparx5->bridge_fwd_mask)) {
+                       /* Allow to send to all bridged but self */
+                       bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
+                       clear_bit(port, workmask);
+                       bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+                       spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
+                       spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
+                       spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+               } else {
+                       spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
+                       spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
+                       spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+               }
+       }
+
+       /* Learning enabled only for bridged ports */
+       bitmap_and(workmask, sparx5->bridge_fwd_mask,
+                  sparx5->bridge_lrn_mask, SPX5_PORTS);
+       bitmap_to_arr32(mask, workmask, SPX5_PORTS);
+
+       /* Apply learning mask */
+       spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
+       spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
+       spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+}
+
+void sparx5_vlan_port_apply(struct sparx5 *sparx5,
+                           struct sparx5_port *port)
+
+{
+       u32 val;
+
+       /* Configure PVID, vlan aware */
+       val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
+               ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
+               ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
+       spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
+
+       val = 0;
+       if (port->vlan_aware && !port->pvid)
+               /* If port is vlan-aware and tagged, drop untagged and
+                * priority tagged frames.
+                */
+               val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
+                       ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
+                       ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
+       spx5_wr(val, sparx5,
+               ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
+
+       /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+       val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0);
+       if (port->vlan_aware) {
+               if (port->vid)
+                       /* Tag all frames except when VID == DEFAULT_VLAN */
+                       val |= REW_TAG_CTRL_TAG_CFG_SET(1);
+               else
+                       val |= REW_TAG_CTRL_TAG_CFG_SET(3);
+       }
+       spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
+
+       /* Egress VID */
+       spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+                REW_PORT_VLAN_CFG_PORT_VID,
+                sparx5,
+                REW_PORT_VLAN_CFG(port->portno));
+}
index 0c42833..adfb978 100644 (file)
@@ -379,6 +379,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
 
 int ocelot_port_flush(struct ocelot *ocelot, int port)
 {
+       unsigned int pause_ena;
        int err, val;
 
        /* Disable dequeuing from the egress queues */
@@ -387,6 +388,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
                       QSYS_PORT_MODE, port);
 
        /* Disable flow control */
+       ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
        ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
 
        /* Disable priority flow control */
@@ -422,6 +424,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
        /* Clear flushing again. */
        ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
 
+       /* Re-enable flow control */
+       ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
+
        return err;
 }
 EXPORT_SYMBOL(ocelot_port_flush);
index aad33d2..3e89e34 100644 (file)
@@ -939,7 +939,7 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
                       ANA_PORT_CPU_FWD_CFG, port);
 }
 
-static int ocelot_port_attr_set(struct net_device *dev,
+static int ocelot_port_attr_set(struct net_device *dev, const void *ctx,
                                const struct switchdev_attr *attr,
                                struct netlink_ext_ack *extack)
 {
@@ -948,6 +948,9 @@ static int ocelot_port_attr_set(struct net_device *dev,
        int port = priv->chip_port;
        int err = 0;
 
+       if (ctx && ctx != priv)
+               return 0;
+
        switch (attr->id) {
        case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
                ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state);
@@ -1058,12 +1061,16 @@ ocelot_port_obj_mrp_del_ring_role(struct net_device *dev,
        return ocelot_mrp_del_ring_role(ocelot, port, mrp);
 }
 
-static int ocelot_port_obj_add(struct net_device *dev,
+static int ocelot_port_obj_add(struct net_device *dev, const void *ctx,
                               const struct switchdev_obj *obj,
                               struct netlink_ext_ack *extack)
 {
+       struct ocelot_port_private *priv = netdev_priv(dev);
        int ret = 0;
 
+       if (ctx && ctx != priv)
+               return 0;
+
        switch (obj->id) {
        case SWITCHDEV_OBJ_ID_PORT_VLAN:
                ret = ocelot_port_obj_add_vlan(dev,
@@ -1086,11 +1093,15 @@ static int ocelot_port_obj_add(struct net_device *dev,
        return ret;
 }
 
-static int ocelot_port_obj_del(struct net_device *dev,
+static int ocelot_port_obj_del(struct net_device *dev, const void *ctx,
                               const struct switchdev_obj *obj)
 {
+       struct ocelot_port_private *priv = netdev_priv(dev);
        int ret = 0;
 
+       if (ctx && ctx != priv)
+               return 0;
+
        switch (obj->id) {
        case SWITCHDEV_OBJ_ID_PORT_VLAN:
                ret = ocelot_vlan_vid_del(dev,
@@ -1143,10 +1154,14 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
                                 struct net_device *bridge_dev,
                                 struct netlink_ext_ack *extack)
 {
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       struct ocelot_port_private *priv;
        clock_t ageing_time;
        u8 stp_state;
        int err;
 
+       priv = container_of(ocelot_port, struct ocelot_port_private, port);
+
        ocelot_inherit_brport_flags(ocelot, port, brport_dev);
 
        stp_state = br_port_get_stp_state(brport_dev);
@@ -1160,16 +1175,12 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
        ageing_time = br_get_ageing_time(bridge_dev);
        ocelot_port_attr_ageing_set(ocelot, port, ageing_time);
 
-       err = br_mdb_replay(bridge_dev, brport_dev,
+       err = br_mdb_replay(bridge_dev, brport_dev, priv, true,
                            &ocelot_switchdev_blocking_nb, extack);
        if (err && err != -EOPNOTSUPP)
                return err;
 
-       err = br_fdb_replay(bridge_dev, brport_dev, &ocelot_switchdev_nb);
-       if (err)
-               return err;
-
-       err = br_vlan_replay(bridge_dev, brport_dev,
+       err = br_vlan_replay(bridge_dev, brport_dev, priv, true,
                             &ocelot_switchdev_blocking_nb, extack);
        if (err && err != -EOPNOTSUPP)
                return err;
index 0528b8f..82eef4c 100644 (file)
@@ -3678,10 +3678,9 @@ static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
                        driver_config->vpath_per_dev = 1;
 
                for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
-                       if (!vxge_bVALn(vpath_mask, i, 1))
-                               continue;
-                       else
+                       if (vxge_bVALn(vpath_mask, i, 1))
                                default_no_vpath++;
+
                if (default_no_vpath < driver_config->vpath_per_dev)
                        driver_config->vpath_per_dev = default_no_vpath;
 
index 9ea77bb..273d529 100644 (file)
@@ -1067,6 +1067,8 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
                                                    nfp_ct_map_params);
                if (!ct_map_ent) {
                        ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
+                       if (IS_ERR(ct_entry))
+                               return PTR_ERR(ct_entry);
                        ct_entry->type = CT_TYPE_NFT;
                        list_add(&ct_entry->list_node, &zt->nft_flows_list);
                        zt->nft_flows_count++;
index 7e6bac8..344ea11 100644 (file)
@@ -1602,6 +1602,8 @@ err_out_free_netdev:
        free_netdev(netdev);
 
 err_out_free_res:
+       if (NX_IS_REVISION_P3(pdev->revision))
+               pci_disable_pcie_error_reporting(pdev);
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
index 17d5b64..e81dd34 100644 (file)
@@ -1266,9 +1266,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
                p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
 
        p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+       BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
+                    sizeof(p_hwfn->p_dcbx_info->set.config.params));
        memcpy(&p_hwfn->p_dcbx_info->set.config.params,
               &dcbx_info->operational.params,
-              sizeof(struct qed_dcbx_admin_params));
+              sizeof(p_hwfn->p_dcbx_info->set.config.params));
        p_hwfn->p_dcbx_info->set.config.valid = true;
 
        memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
index 8a31ce2..b307264 100644 (file)
@@ -2688,6 +2688,7 @@ err_out_free_hw_res:
        kfree(ahw);
 
 err_out_free_res:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
@@ -3341,9 +3342,6 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
        do {
                msleep(1000);
                prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
-
-               if (prev_state == QLCNIC_DEV_QUISCENT)
-                       continue;
        } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
 
        if (!dev_init_timeo) {
index 3ee5c1a..3676976 100644 (file)
@@ -168,7 +168,7 @@ static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
        void *txphdr;
        u16 *csum;
 
-       txphdr = ip4h + ip4h->ihl * 4;
+       txphdr = (void *)ip4h + ip4h->ihl * 4;
 
        if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
                csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
@@ -203,7 +203,7 @@ rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
        void *txphdr;
        u16 *csum;
 
-       txphdr = ip6h + sizeof(struct ipv6hdr);
+       txphdr = ip6h + 1;
 
        if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
                csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
index 6556b53..13d8eb4 100644 (file)
@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
                              struct rtnl_link_stats64 *s)
 {
        struct rmnet_priv *priv = netdev_priv(dev);
-       struct rmnet_vnd_stats total_stats;
+       struct rmnet_vnd_stats total_stats = { };
        struct rmnet_pcpu_stats *pcpu_ptr;
+       struct rmnet_vnd_stats snapshot;
        unsigned int cpu, start;
 
-       memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
-
        for_each_possible_cpu(cpu) {
                pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
 
                do {
                        start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
-                       total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
-                       total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
-                       total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
-                       total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+                       snapshot = pcpu_ptr->stats;     /* struct assignment */
                } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
 
-               total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+               total_stats.rx_pkts += snapshot.rx_pkts;
+               total_stats.rx_bytes += snapshot.rx_bytes;
+               total_stats.tx_pkts += snapshot.tx_pkts;
+               total_stats.tx_bytes += snapshot.tx_bytes;
+               total_stats.tx_drops += snapshot.tx_drops;
        }
 
        s->rx_packets = total_stats.rx_pkts;
@@ -356,4 +356,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
        }
 
        return 0;
-}
\ No newline at end of file
+}
index 6a9fe9f..f744557 100644 (file)
@@ -1669,7 +1669,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
        switch(stringset) {
        case ETH_SS_STATS:
-               memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+               memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
                break;
        }
 }
index 177523b..8404786 100644 (file)
@@ -2287,7 +2287,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 {
        switch (stringset) {
        case ETH_SS_STATS:
-               memcpy(data, *sh_eth_gstrings_stats,
+               memcpy(data, sh_eth_gstrings_stats,
                       sizeof(sh_eth_gstrings_stats));
                break;
        }
index 9a19e4d..ac3c248 100644 (file)
@@ -250,6 +250,15 @@ config DWMAC_INTEL
          This selects the Intel platform specific bus support for the
          stmmac driver. This driver is used for Intel Quark/EHL/TGL.
 
+config DWMAC_LOONGSON
+       tristate "Loongson PCI DWMAC support"
+       default MACH_LOONGSON64
+       depends on STMMAC_ETH && PCI
+       depends on COMMON_CLK
+       help
+         This selects the LOONGSON PCI bus support for the stmmac driver,
+         Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
+
 config STMMAC_PCI
        tristate "STMMAC PCI bus support"
        depends on STMMAC_ETH && PCI
index 6471f93..d4e12e9 100644 (file)
@@ -37,4 +37,5 @@ dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
 
 obj-$(CONFIG_STMMAC_PCI)       += stmmac-pci.o
 obj-$(CONFIG_DWMAC_INTEL)      += dwmac-intel.o
+obj-$(CONFIG_DWMAC_LOONGSON)   += dwmac-loongson.o
 stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
new file mode 100644 (file)
index 0000000..e108b0d
--- /dev/null
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Loongson Corporation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/of_irq.h>
+#include "stmmac.h"
+
+static int loongson_default_data(struct plat_stmmacenet_data *plat)
+{
+       plat->clk_csr = 2;      /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+       plat->has_gmac = 1;
+       plat->force_sf_dma_mode = 1;
+
+       /* Set default value for multicast hash bins */
+       plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat->unicast_filter_entries = 1;
+
+       /* Set the maxmtu to a default of JUMBO_LEN */
+       plat->maxmtu = JUMBO_LEN;
+
+       /* Set default number of RX and TX queues to use */
+       plat->tx_queues_to_use = 1;
+       plat->rx_queues_to_use = 1;
+
+       /* Disable Priority config by default */
+       plat->tx_queues_cfg[0].use_prio = false;
+       plat->rx_queues_cfg[0].use_prio = false;
+
+       /* Disable RX queues routing by default */
+       plat->rx_queues_cfg[0].pkt_route = 0x0;
+
+       /* Default to phy auto-detection */
+       plat->phy_addr = -1;
+
+       plat->dma_cfg->pbl = 32;
+       plat->dma_cfg->pblx8 = true;
+
+       plat->multicast_filter_bins = 256;
+       return 0;
+}
+
+static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct plat_stmmacenet_data *plat;
+       struct stmmac_resources res;
+       bool mdio = false;
+       int ret, i;
+       struct device_node *np;
+
+       np = dev_of_node(&pdev->dev);
+
+       if (!np) {
+               pr_info("dwmac_loongson_pci: No OF node\n");
+               return -ENODEV;
+       }
+
+       if (!of_device_is_compatible(np, "loongson, pci-gmac")) {
+               pr_info("dwmac_loongson_pci: Incompatible OF node\n");
+               return -ENODEV;
+       }
+
+       plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+       if (!plat)
+               return -ENOMEM;
+
+       if (plat->mdio_node) {
+               dev_err(&pdev->dev, "Found MDIO subnode\n");
+               mdio = true;
+       }
+
+       if (mdio) {
+               plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+                                                  sizeof(*plat->mdio_bus_data),
+                                                  GFP_KERNEL);
+               if (!plat->mdio_bus_data)
+                       return -ENOMEM;
+               plat->mdio_bus_data->needs_reset = true;
+       }
+
+       plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
+       if (!plat->dma_cfg)
+               return -ENOMEM;
+
+       /* Enable pci device */
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
+               return ret;
+       }
+
+       /* Get the base address of device */
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+               if (pci_resource_len(pdev, i) == 0)
+                       continue;
+               ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+               if (ret)
+                       return ret;
+               break;
+       }
+
+       plat->bus_id = of_alias_get_id(np, "ethernet");
+       if (plat->bus_id < 0)
+               plat->bus_id = pci_dev_id(pdev);
+
+       plat->phy_interface = device_get_phy_mode(&pdev->dev);
+       if (plat->phy_interface < 0)
+               dev_err(&pdev->dev, "phy_mode not found\n");
+
+       plat->interface = PHY_INTERFACE_MODE_GMII;
+
+       pci_set_master(pdev);
+
+       loongson_default_data(plat);
+       pci_enable_msi(pdev);
+       memset(&res, 0, sizeof(res));
+       res.addr = pcim_iomap_table(pdev)[0];
+
+       res.irq = of_irq_get_byname(np, "macirq");
+       if (res.irq < 0) {
+               dev_err(&pdev->dev, "IRQ macirq not found\n");
+               ret = -ENODEV;
+       }
+
+       res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
+       if (res.wol_irq < 0) {
+               dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
+               res.wol_irq = res.irq;
+       }
+
+       res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
+       if (res.lpi_irq < 0) {
+               dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
+               ret = -ENODEV;
+       }
+
+       return stmmac_dvr_probe(&pdev->dev, plat, &res);
+}
+
+static void loongson_dwmac_remove(struct pci_dev *pdev)
+{
+       int i;
+
+       stmmac_dvr_remove(&pdev->dev);
+
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+               if (pci_resource_len(pdev, i) == 0)
+                       continue;
+               pcim_iounmap_regions(pdev, BIT(i));
+               break;
+       }
+
+       pci_disable_device(pdev);
+}
+
+static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int ret;
+
+       ret = stmmac_suspend(dev);
+       if (ret)
+               return ret;
+
+       ret = pci_save_state(pdev);
+       if (ret)
+               return ret;
+
+       pci_disable_device(pdev);
+       pci_wake_from_d3(pdev, true);
+       return 0;
+}
+
+static int __maybe_unused loongson_dwmac_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int ret;
+
+       pci_restore_state(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+
+       pci_set_master(pdev);
+
+       return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
+                        loongson_dwmac_resume);
+
+static const struct pci_device_id loongson_dwmac_id_table[] = {
+       { PCI_VDEVICE(LOONGSON, 0x7a03) },
+       {}
+};
+MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
+
+struct pci_driver loongson_dwmac_driver = {
+       .name = "dwmac-loongson-pci",
+       .id_table = loongson_dwmac_id_table,
+       .probe = loongson_dwmac_probe,
+       .remove = loongson_dwmac_remove,
+       .driver = {
+               .pm = &loongson_dwmac_pm_ops,
+       },
+};
+
+module_pci_driver(loongson_dwmac_driver);
+
+MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
+MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
+MODULE_LICENSE("GPL v2");
index b70d44a..3c73453 100644 (file)
@@ -76,10 +76,10 @@ enum power_event {
 #define LPI_CTRL_STATUS_TLPIEN 0x00000001      /* Transmit LPI Entry */
 
 /* GMAC HW ADDR regs */
-#define GMAC_ADDR_HIGH(reg)    (((reg > 15) ? 0x00000800 : 0x00000040) + \
-                               (reg * 8))
-#define GMAC_ADDR_LOW(reg)     (((reg > 15) ? 0x00000804 : 0x00000044) + \
-                               (reg * 8))
+#define GMAC_ADDR_HIGH(reg)    ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
+                                0x00000040 + (reg * 8))
+#define GMAC_ADDR_LOW(reg)     ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
+                                0x00000044 + (reg * 8))
 #define GMAC_MAX_PERFECT_ADDRESSES     1
 
 #define GMAC_PCS_BASE          0x000000c0      /* PCS register base */
index d8ae58b..072eff8 100644 (file)
@@ -627,6 +627,8 @@ error_pclk_get:
 void stmmac_remove_config_dt(struct platform_device *pdev,
                             struct plat_stmmacenet_data *plat)
 {
+       clk_disable_unprepare(plat->stmmac_clk);
+       clk_disable_unprepare(plat->pclk);
        of_node_put(plat->phy_node);
        of_node_put(plat->mdio_node);
 }
index 4e70efc..92dab60 100644 (file)
@@ -573,10 +573,8 @@ static int tc_add_flow(struct stmmac_priv *priv,
 
        for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
                ret = tc_flow_parsers[i].fn(priv, cls, entry);
-               if (!ret) {
+               if (!ret)
                        entry->in_use = true;
-                       continue;
-               }
        }
 
        if (!entry->in_use)
index 23cfb91..9c29b36 100644 (file)
@@ -84,7 +84,7 @@ static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
        return 0;
 }
 
-static int am65_cpsw_port_attr_set(struct net_device *ndev,
+static int am65_cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
                                   const struct switchdev_attr *attr,
                                   struct netlink_ext_ack *extack)
 {
@@ -302,7 +302,7 @@ static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
        return 0;
 }
 
-static int am65_cpsw_port_obj_add(struct net_device *ndev,
+static int am65_cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
                                  const struct switchdev_obj *obj,
                                  struct netlink_ext_ack *extack)
 {
@@ -329,7 +329,7 @@ static int am65_cpsw_port_obj_add(struct net_device *ndev,
        return err;
 }
 
-static int am65_cpsw_port_obj_del(struct net_device *ndev,
+static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
                                  const struct switchdev_obj *obj)
 {
        struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
index 05a64fb..f7fb6e1 100644 (file)
@@ -86,7 +86,7 @@ static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
        return 0;
 }
 
-static int cpsw_port_attr_set(struct net_device *ndev,
+static int cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
                              const struct switchdev_attr *attr,
                              struct netlink_ext_ack *extack)
 {
@@ -310,7 +310,7 @@ static int cpsw_port_mdb_del(struct cpsw_priv *priv,
        return err;
 }
 
-static int cpsw_port_obj_add(struct net_device *ndev,
+static int cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
                             const struct switchdev_obj *obj,
                             struct netlink_ext_ack *extack)
 {
@@ -338,7 +338,7 @@ static int cpsw_port_obj_add(struct net_device *ndev,
        return err;
 }
 
-static int cpsw_port_obj_del(struct net_device *ndev,
+static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
                             const struct switchdev_obj *obj)
 {
        struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
index a1f5f07..60a4f79 100644 (file)
@@ -774,12 +774,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
        stat = be32_to_cpu(cur_p->app0);
 
        while (stat & STS_CTRL_APP0_CMPLT) {
+               /* Make sure that the other fields are read after bd is
+                * released by dma
+                */
+               rmb();
                dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
                                 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
                skb = (struct sk_buff *)ptr_from_txbd(cur_p);
                if (skb)
                        dev_consume_skb_irq(skb);
-               cur_p->app0 = 0;
                cur_p->app1 = 0;
                cur_p->app2 = 0;
                cur_p->app3 = 0;
@@ -788,6 +791,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
                ndev->stats.tx_packets++;
                ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
 
+               /* app0 must be visible last, as it is used to flag
+                * availability of the bd
+                */
+               smp_mb();
+               cur_p->app0 = 0;
+
                lp->tx_bd_ci++;
                if (lp->tx_bd_ci >= lp->tx_bd_num)
                        lp->tx_bd_ci = 0;
@@ -814,6 +823,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
                if (cur_p->app0)
                        return NETDEV_TX_BUSY;
 
+               /* Make sure to read next bd app0 after this one */
+               rmb();
+
                tail++;
                if (tail >= lp->tx_bd_num)
                        tail = 0;
@@ -849,7 +861,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                smp_mb();
 
                /* Space might have just been freed - check again */
-               if (temac_check_tx_bd_space(lp, num_frag))
+               if (temac_check_tx_bd_space(lp, num_frag + 1))
                        return NETDEV_TX_BUSY;
 
                netif_wake_queue(ndev);
@@ -876,7 +888,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                return NETDEV_TX_OK;
        }
        cur_p->phys = cpu_to_be32(skb_dma_addr);
-       ptr_to_txbd((void *)skb, cur_p);
 
        for (ii = 0; ii < num_frag; ii++) {
                if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -915,6 +926,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
 
+       /* Mark last fragment with skb address, so it can be consumed
+        * in temac_start_xmit_done()
+        */
+       ptr_to_txbd((void *)skb, cur_p);
+
        tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
        lp->tx_bd_tail++;
        if (lp->tx_bd_tail >= lp->tx_bd_num)
@@ -926,6 +942,9 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        wmb();
        lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
 
+       if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+               netif_stop_queue(ndev);
+
        return NETDEV_TX_OK;
 }
 
index 1c9023d..30e0a10 100644 (file)
@@ -201,6 +201,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
         * calculate the transport header.
         */
        skb_reset_network_header(skb);
+       skb_reset_mac_header(skb);
 
        skb->dev = pctx->dev;
 
index 9933c87..b991286 100644 (file)
@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
        ax->tty = NULL;
 
        unregister_netdev(ax->dev);
+       free_netdev(ax->dev);
 }
 
 /* Perform I/O control on an active ax25 channel. */
index 442c520..9e5eee4 100644 (file)
@@ -1163,6 +1163,7 @@ struct rndis_set_request {
        u32 info_buflen;
        u32 info_buf_offset;
        u32 dev_vc_handle;
+       u8  info_buf[];
 };
 
 /* Response to NdisSetRequest */
index c0e89e1..033ed6e 100644 (file)
@@ -1051,10 +1051,8 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
        set = &request->request_msg.msg.set_req;
        set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
        set->info_buflen = sizeof(u32);
-       set->info_buf_offset = sizeof(struct rndis_set_request);
-
-       memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
-              &new_filter, sizeof(u32));
+       set->info_buf_offset = offsetof(typeof(*set), info_buf);
+       memcpy(set->info_buf, &new_filter, sizeof(u32));
 
        ret = rndis_filter_send_request(dev, request);
        if (ret == 0) {
index bd34fce..506f8d5 100644 (file)
@@ -10,6 +10,6 @@ ipa-y                 :=      ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
                                ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
                                ipa_sysfs.o
 
-ipa-y                  +=      ipa_data-v3.5.1.o ipa_data-v4.2.o \
-                               ipa_data-v4.5.o ipa_data-v4.9.o \
-                               ipa_data-v4.11.o
+ipa-y                  +=      ipa_data-v3.1.o ipa_data-v3.5.1.o \
+                               ipa_data-v4.2.o ipa_data-v4.5.o \
+                               ipa_data-v4.9.o ipa_data-v4.11.o
index e374079..427c68b 100644 (file)
@@ -210,13 +210,65 @@ static void gsi_irq_setup(struct gsi *gsi)
        iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
 
-       /* The inter-EE registers are in the non-adjusted address range */
-       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
-       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
+       /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
+       if (gsi->version > IPA_VERSION_3_1) {
+               u32 offset;
+
+               /* These registers are in the non-adjusted address range */
+               offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
+               iowrite32(0, gsi->virt_raw + offset);
+               offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
+               iowrite32(0, gsi->virt_raw + offset);
+       }
 
        iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
 }
 
+/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
+static int gsi_ring_setup(struct gsi *gsi)
+{
+       struct device *dev = gsi->dev;
+       u32 count;
+       u32 val;
+
+       if (gsi->version < IPA_VERSION_3_5_1) {
+               /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
+               gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+               gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+
+               return 0;
+       }
+
+       val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+
+       count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+       if (!count) {
+               dev_err(dev, "GSI reports zero channels supported\n");
+               return -EINVAL;
+       }
+       if (count > GSI_CHANNEL_COUNT_MAX) {
+               dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
+                        GSI_CHANNEL_COUNT_MAX, count);
+               count = GSI_CHANNEL_COUNT_MAX;
+       }
+       gsi->channel_count = count;
+
+       count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+       if (!count) {
+               dev_err(dev, "GSI reports zero event rings supported\n");
+               return -EINVAL;
+       }
+       if (count > GSI_EVT_RING_COUNT_MAX) {
+               dev_warn(dev,
+                        "limiting to %u event rings; hardware supports %u\n",
+                        GSI_EVT_RING_COUNT_MAX, count);
+               count = GSI_EVT_RING_COUNT_MAX;
+       }
+       gsi->evt_ring_count = count;
+
+       return 0;
+}
+
 /* Event ring commands are performed one at a time.  Their completion
  * is signaled by the event ring control GSI interrupt type, which is
  * only enabled when we issue an event ring command.  Only the event
@@ -1827,43 +1879,21 @@ static void gsi_channel_teardown(struct gsi *gsi)
 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
 int gsi_setup(struct gsi *gsi)
 {
-       struct device *dev = gsi->dev;
        u32 val;
+       int ret;
 
        /* Here is where we first touch the GSI hardware */
        val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
        if (!(val & ENABLED_FMASK)) {
-               dev_err(dev, "GSI has not been enabled\n");
+               dev_err(gsi->dev, "GSI has not been enabled\n");
                return -EIO;
        }
 
        gsi_irq_setup(gsi);             /* No matching teardown required */
 
-       val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
-
-       gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
-       if (!gsi->channel_count) {
-               dev_err(dev, "GSI reports zero channels supported\n");
-               return -EINVAL;
-       }
-       if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
-               dev_warn(dev,
-                        "limiting to %u channels; hardware supports %u\n",
-                        GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
-               gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
-       }
-
-       gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
-       if (!gsi->evt_ring_count) {
-               dev_err(dev, "GSI reports zero event rings supported\n");
-               return -EINVAL;
-       }
-       if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
-               dev_warn(dev,
-                        "limiting to %u event rings; hardware supports %u\n",
-                        GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
-               gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
-       }
+       ret = gsi_ring_setup(gsi);      /* No matching teardown required */
+       if (ret)
+               return ret;
 
        /* Initialize the error log */
        iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
index d5996bd..81cd7b0 100644 (file)
@@ -17,7 +17,7 @@
 
 /* Maximum number of channels and event rings supported by the driver */
 #define GSI_CHANNEL_COUNT_MAX  23
-#define GSI_EVT_RING_COUNT_MAX 20
+#define GSI_EVT_RING_COUNT_MAX 24
 
 /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
 #define GSI_TLV_MAX            64
index cb42c5a..bf9593d 100644 (file)
@@ -52,7 +52,8 @@
  */
 #define GSI_EE_REG_ADJUST                      0x0000d000      /* IPA v4.5+ */
 
-/* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
+/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
+
 #define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
                        GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
 #define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c
new file mode 100644 (file)
index 0000000..4c28189
--- /dev/null
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
+enum ipa_resource_type {
+       /* Source resource types; first must have value 0 */
+       IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS              = 0,
+       IPA_RESOURCE_TYPE_SRC_HDR_SECTORS,
+       IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER,
+       IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+       IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+       IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS,
+       IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+       IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+       /* Destination resource types; first must have value 0 */
+       IPA_RESOURCE_TYPE_DST_DATA_SECTORS              = 0,
+       IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS,
+       IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v3.1 */
+enum ipa_rsrc_group_id {
+       /* Source resource group identifiers */
+       IPA_RSRC_GROUP_SRC_UL           = 0,
+       IPA_RSRC_GROUP_SRC_DL,
+       IPA_RSRC_GROUP_SRC_DIAG,
+       IPA_RSRC_GROUP_SRC_DMA,
+       IPA_RSRC_GROUP_SRC_UNUSED,
+       IPA_RSRC_GROUP_SRC_UC_RX_Q,
+       IPA_RSRC_GROUP_SRC_COUNT,       /* Last in set; not a source group */
+
+       /* Destination resource group identifiers */
+       IPA_RSRC_GROUP_DST_UL           = 0,
+       IPA_RSRC_GROUP_DST_DL,
+       IPA_RSRC_GROUP_DST_DIAG_DPL,
+       IPA_RSRC_GROUP_DST_DMA,
+       IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL,
+       IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE,
+       IPA_RSRC_GROUP_DST_COUNT,       /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v3.1 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+       [IPA_QSB_MASTER_DDR] = {
+               .max_writes     = 8,
+               .max_reads      = 8,
+       },
+       [IPA_QSB_MASTER_PCIE] = {
+               .max_writes     = 2,
+               .max_reads      = 8,
+       },
+};
+
+/* Endpoint data for an SoC having IPA v3.1 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+       [IPA_ENDPOINT_AP_COMMAND_TX] = {
+               .ee_id          = GSI_EE_AP,
+               .channel_id     = 6,
+               .endpoint_id    = 22,
+               .toward_ipa     = true,
+               .channel = {
+                       .tre_count      = 256,
+                       .event_count    = 256,
+                       .tlv_count      = 18,
+               },
+               .endpoint = {
+                       .config = {
+                               .resource_group = IPA_RSRC_GROUP_SRC_UL,
+                               .dma_mode       = true,
+                               .dma_endpoint   = IPA_ENDPOINT_AP_LAN_RX,
+                               .tx = {
+                                       .seq_type = IPA_SEQ_DMA,
+                               },
+                       },
+               },
+       },
+       [IPA_ENDPOINT_AP_LAN_RX] = {
+               .ee_id          = GSI_EE_AP,
+               .channel_id     = 7,
+               .endpoint_id    = 15,
+               .toward_ipa     = false,
+               .channel = {
+                       .tre_count      = 256,
+                       .event_count    = 256,
+                       .tlv_count      = 8,
+               },
+               .endpoint = {
+                       .config = {
+                               .resource_group = IPA_RSRC_GROUP_SRC_UL,
+                               .aggregation    = true,
+                               .status_enable  = true,
+                               .rx = {
+                                       .pad_align      = ilog2(sizeof(u32)),
+                               },
+                       },
+               },
+       },
+       [IPA_ENDPOINT_AP_MODEM_TX] = {
+               .ee_id          = GSI_EE_AP,
+               .channel_id     = 5,
+               .endpoint_id    = 3,
+               .toward_ipa     = true,
+               .channel = {
+                       .tre_count      = 512,
+                       .event_count    = 512,
+                       .tlv_count      = 16,
+               },
+               .endpoint = {
+                       .filter_support = true,
+                       .config = {
+                               .resource_group = IPA_RSRC_GROUP_SRC_UL,
+                               .checksum       = true,
+                               .qmap           = true,
+                               .status_enable  = true,
+                               .tx = {
+                                       .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+                                       .status_endpoint =
+                                               IPA_ENDPOINT_MODEM_AP_RX,
+                               },
+                       },
+               },
+       },
+       [IPA_ENDPOINT_AP_MODEM_RX] = {
+               .ee_id          = GSI_EE_AP,
+               .channel_id     = 8,
+               .endpoint_id    = 16,
+               .toward_ipa     = false,
+               .channel = {
+                       .tre_count      = 256,
+                       .event_count    = 256,
+                       .tlv_count      = 8,
+               },
+               .endpoint = {
+                       .config = {
+                               .resource_group = IPA_RSRC_GROUP_DST_DL,
+                               .checksum       = true,
+                               .qmap           = true,
+                               .aggregation    = true,
+                               .rx = {
+                                       .aggr_close_eof = true,
+                               },
+                       },
+               },
+       },
+       [IPA_ENDPOINT_MODEM_LAN_TX] = {
+               .ee_id          = GSI_EE_MODEM,
+               .channel_id     = 4,
+               .endpoint_id    = 9,
+               .toward_ipa     = true,
+               .endpoint = {
+                       .filter_support = true,
+               },
+       },
+       [IPA_ENDPOINT_MODEM_AP_TX] = {
+               .ee_id          = GSI_EE_MODEM,
+               .channel_id     = 0,
+               .endpoint_id    = 5,
+               .toward_ipa     = true,
+               .endpoint = {
+                       .filter_support = true,
+               },
+       },
+       [IPA_ENDPOINT_MODEM_AP_RX] = {
+               .ee_id          = GSI_EE_MODEM,
+               .channel_id     = 5,
+               .endpoint_id    = 18,
+               .toward_ipa     = false,
+       },
+};
+
+/* Source resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_src[] = {
+       [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 3,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 3,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 1,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 1,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 2,       .max = 255,
+               },
+       },
+       [IPA_RESOURCE_TYPE_SRC_HDR_SECTORS] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 0,       .max = 255,
+               },
+       },
+       [IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 0,       .max = 255,
+               },
+       },
+       [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 14,      .max = 14,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 16,      .max = 16,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 5,       .max = 5,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 5,       .max = 5,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 8,       .max = 8,
+               },
+       },
+       [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 19,      .max = 19,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 26,      .max = 26,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 5,       .max = 5,       /* 3 downstream */
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 5,       .max = 5,       /* 7 downstream */
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 8,       .max = 8,
+               },
+       },
+       [IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 0,       .max = 255,
+               },
+       },
+       [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 0,       .max = 255,
+               },
+       },
+       [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+               .limits[IPA_RSRC_GROUP_SRC_UL] = {
+                       .min = 19,      .max = 19,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DL] = {
+                       .min = 26,      .max = 26,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+                       .min = 5,       .max = 5,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+                       .min = 5,       .max = 5,
+               },
+               .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+                       .min = 8,       .max = 8,
+               },
+       },
+};
+
+/* Destination resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_dst[] = {
+       [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+               .limits[IPA_RSRC_GROUP_DST_UL] = {
+                       .min = 3,       .max = 3,       /* 2 downstream */
+               },
+               .limits[IPA_RSRC_GROUP_DST_DL] = {
+                       .min = 3,       .max = 3,
+               },
+               .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+                       .min = 1,       .max = 1,       /* 0 downstream */
+               },
+               /* IPA_RSRC_GROUP_DST_DMA uses 2 downstream */
+               .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+                       .min = 3,       .max = 3,
+               },
+               .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+                       .min = 3,       .max = 3,
+               },
+       },
+       [IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS] = {
+               .limits[IPA_RSRC_GROUP_DST_UL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_DST_DL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_DST_DMA] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+                       .min = 0,       .max = 255,
+               },
+               .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+                       .min = 0,       .max = 255,
+               },
+       },
+       [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+               .limits[IPA_RSRC_GROUP_DST_UL] = {
+                       .min = 1,       .max = 1,
+               },
+               .limits[IPA_RSRC_GROUP_DST_DL] = {
+                       .min = 1,       .max = 1,
+               },
+               .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+                       .min = 1,       .max = 1,
+               },
+               .limits[IPA_RSRC_GROUP_DST_DMA] = {
+                       .min = 1,       .max = 1,
+               },
+               .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+                       .min = 1,       .max = 1,
+               },
+       },
+};
+
+/* Resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource_data ipa_resource_data = {
+       .rsrc_group_src_count   = IPA_RSRC_GROUP_SRC_COUNT,
+       .rsrc_group_dst_count   = IPA_RSRC_GROUP_DST_COUNT,
+       .resource_src_count     = ARRAY_SIZE(ipa_resource_src),
+       .resource_src           = ipa_resource_src,
+       .resource_dst_count     = ARRAY_SIZE(ipa_resource_dst),
+       .resource_dst           = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v3.1 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+       {
+               .id             = IPA_MEM_UC_SHARED,
+               .offset         = 0x0000,
+               .size           = 0x0080,
+               .canary_count   = 0,
+       },
+       {
+               .id             = IPA_MEM_UC_INFO,
+               .offset         = 0x0080,
+               .size           = 0x0200,
+               .canary_count   = 0,
+       },
+       {
+               .id             = IPA_MEM_V4_FILTER_HASHED,
+               .offset         = 0x0288,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_V4_FILTER,
+               .offset         = 0x0308,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_V6_FILTER_HASHED,
+               .offset         = 0x0388,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_V6_FILTER,
+               .offset         = 0x0408,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_V4_ROUTE_HASHED,
+               .offset         = 0x0488,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_V4_ROUTE,
+               .offset         = 0x0508,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_V6_ROUTE_HASHED,
+               .offset         = 0x0588,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_V6_ROUTE,
+               .offset         = 0x0608,
+               .size           = 0x0078,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_MODEM_HEADER,
+               .offset         = 0x0688,
+               .size           = 0x0140,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_MODEM_PROC_CTX,
+               .offset         = 0x07d0,
+               .size           = 0x0200,
+               .canary_count   = 2,
+       },
+       {
+               .id             = IPA_MEM_AP_PROC_CTX,
+               .offset         = 0x09d0,
+               .size           = 0x0200,
+               .canary_count   = 0,
+       },
+       {
+               .id             = IPA_MEM_MODEM,
+               .offset         = 0x0bd8,
+               .size           = 0x1424,
+               .canary_count   = 0,
+       },
+       {
+               .id             = IPA_MEM_END_MARKER,
+               .offset         = 0x2000,
+               .size           = 0,
+               .canary_count   = 1,
+       },
+};
+
+/* Memory configuration data for an SoC having IPA v3.1 */
+static const struct ipa_mem_data ipa_mem_data = {
+       .local_count    = ARRAY_SIZE(ipa_mem_local_data),
+       .local          = ipa_mem_local_data,
+       .imem_addr      = 0x146bd000,
+       .imem_size      = 0x00002000,
+       .smem_id        = 497,
+       .smem_size      = 0x00002000,
+};
+
+/* Interconnect bandwidths are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+       {
+               .name                   = "memory",
+               .peak_bandwidth         = 640000,       /* 640 MBps */
+               .average_bandwidth      = 80000,        /* 80 MBps */
+       },
+       {
+               .name                   = "imem",
+               .peak_bandwidth         = 640000,       /* 640 MBps */
+               .average_bandwidth      = 80000,        /* 80 MBps */
+       },
+       /* Average bandwidth is unused for the next interconnect */
+       {
+               .name                   = "config",
+               .peak_bandwidth         = 80000,        /* 80 MBps */
+               .average_bandwidth      = 0,            /* unused */
+       },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v3.1 */
+static const struct ipa_clock_data ipa_clock_data = {
+       .core_clock_rate        = 16 * 1000 * 1000,     /* Hz */
+       .interconnect_count     = ARRAY_SIZE(ipa_interconnect_data),
+       .interconnect_data      = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v3.1 */
+const struct ipa_data ipa_data_v3_1 = {
+       .version        = IPA_VERSION_3_1,
+       .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
+       .qsb_count      = ARRAY_SIZE(ipa_qsb_data),
+       .qsb_data       = ipa_qsb_data,
+       .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+       .endpoint_data  = ipa_gsi_endpoint_data,
+       .resource_data  = &ipa_resource_data,
+       .mem_data       = &ipa_mem_data,
+       .clock_data     = &ipa_clock_data,
+};
index 5c4c8d7..5bc244c 100644 (file)
@@ -300,6 +300,7 @@ struct ipa_data {
        const struct ipa_clock_data *clock_data;
 };
 
+extern const struct ipa_data ipa_data_v3_1;
 extern const struct ipa_data ipa_data_v3_5_1;
 extern const struct ipa_data ipa_data_v4_2;
 extern const struct ipa_data ipa_data_v4_5;
index 3520852..ab02669 100644 (file)
@@ -1731,6 +1731,21 @@ int ipa_endpoint_config(struct ipa *ipa)
        u32 max;
        u32 val;
 
+       /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
+        * Furthermore, the endpoints were not grouped such that TX
+        * endpoint numbers started with 0 and RX endpoints had numbers
+        * higher than all TX endpoints, so we can't do the simple
+        * direction check used for newer hardware below.
+        *
+        * For hardware that doesn't support the FLAVOR_0 register,
+        * just set the available mask to support any endpoint, and
+        * assume the configuration is valid.
+        */
+       if (ipa->version < IPA_VERSION_3_5) {
+               ipa->available = ~0;
+               return 0;
+       }
+
        /* Find out about the endpoints supplied by the hardware, and ensure
         * the highest one doesn't exceed the number we support.
         */
index f82130d..9810c61 100644 (file)
@@ -400,16 +400,20 @@ static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
 
        /* Implement some hardware workarounds */
        if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
-               /* Enable open global clocks (not needed for IPA v4.5) */
-               val = GLOBAL_FMASK;
-               val |= GLOBAL_2X_CLK_FMASK;
-               iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
-
                /* Disable PA mask to allow HOLB drop */
                val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
                val &= ~PA_MASK_EN_FMASK;
                iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+
+               /* Enable open global clocks in the CLKON configuration */
+               val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
+       } else if (version == IPA_VERSION_3_1) {
+               val = MISC_FMASK;       /* Disable MISC clock gating */
+       } else {
+               val = 0;                /* No CLKON configuration needed */
        }
+       if (val)
+               iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
 
        ipa_hardware_config_comp(ipa);
 
@@ -575,6 +579,10 @@ out_release_firmware:
 
 static const struct of_device_id ipa_match[] = {
        {
+               .compatible     = "qcom,msm8998-ipa",
+               .data           = &ipa_data_v3_1,
+       },
+       {
                .compatible     = "qcom,sdm845-ipa",
                .data           = &ipa_data_v3_5_1,
        },
index 832d9de..e60e38c 100644 (file)
@@ -32,7 +32,7 @@ struct mhi_device_info {
 
 static int mhi_ndo_open(struct net_device *ndev)
 {
-       struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+       struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
 
        /* Feed the rx buffer pool */
        schedule_delayed_work(&mhi_netdev->rx_refill, 0);
@@ -47,7 +47,7 @@ static int mhi_ndo_open(struct net_device *ndev)
 
 static int mhi_ndo_stop(struct net_device *ndev)
 {
-       struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+       struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
 
        netif_stop_queue(ndev);
        netif_carrier_off(ndev);
@@ -56,9 +56,9 @@ static int mhi_ndo_stop(struct net_device *ndev)
        return 0;
 }
 
-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
-       struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+       struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
        const struct mhi_net_proto *proto = mhi_netdev->proto;
        struct mhi_device *mdev = mhi_netdev->mdev;
        int err;
@@ -93,7 +93,7 @@ exit_drop:
 static void mhi_ndo_get_stats64(struct net_device *ndev,
                                struct rtnl_link_stats64 *stats)
 {
-       struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+       struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
        unsigned int start;
 
        do {
@@ -322,7 +322,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
        if (dev_get_drvdata(&mhi_dev->dev))
                return -EBUSY;
 
-       mhi_netdev = netdev_priv(ndev);
+       mhi_netdev = wwan_netdev_drvpriv(ndev);
 
        dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
        mhi_netdev->ndev = ndev;
@@ -367,7 +367,7 @@ out_err:
 static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
                            struct list_head *head)
 {
-       struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+       struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
        struct mhi_device *mhi_dev = ctxt;
 
        if (head)
@@ -383,7 +383,6 @@ static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
 }
 
 static const struct wwan_ops mhi_wwan_ops = {
-       .owner = THIS_MODULE,
        .priv_size = sizeof(struct mhi_net_dev),
        .setup = mhi_net_setup,
        .newlink = mhi_net_newlink,
@@ -398,7 +397,8 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
        struct net_device *ndev;
        int err;
 
-       err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev);
+       err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev,
+                               WWAN_NO_DEFAULT_LINK);
        if (err)
                return err;
 
@@ -436,7 +436,7 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
        struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
        struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
 
-       /* rtnetlink takes care of removing remaining links */
+       /* WWAN core takes care of removing remaining links */
        wwan_unregister_ops(&cntrl->mhi_dev->dev);
 
        if (create_default_iface)
index fc72b3f..bf1ad86 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/ip.h>
 #include <linux/mii.h>
 #include <linux/netdevice.h>
+#include <linux/wwan.h>
 #include <linux/skbuff.h>
 #include <linux/usb.h>
 #include <linux/usb/cdc.h>
@@ -56,7 +57,7 @@ static void __mbim_errors_inc(struct mhi_net_dev *dev)
 
 static int mbim_rx_verify_nth16(struct sk_buff *skb)
 {
-       struct mhi_net_dev *dev = netdev_priv(skb->dev);
+       struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev);
        struct mbim_context *ctx = dev->proto_data;
        struct usb_cdc_ncm_nth16 *nth16;
        int len;
@@ -102,7 +103,7 @@ static int mbim_rx_verify_nth16(struct sk_buff *skb)
 
 static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
 {
-       struct mhi_net_dev *dev = netdev_priv(skb->dev);
+       struct mhi_net_dev *dev = wwan_netdev_drvpriv(skb->dev);
        int ret;
 
        if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
index d855219..6348307 100644 (file)
@@ -269,9 +269,9 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
                err = PTR_ERR(nsim_dev->nodes_ddir);
                goto err_out;
        }
-       debugfs_create_bool("fail_trap_counter_get", 0600,
+       debugfs_create_bool("fail_trap_drop_counter_get", 0600,
                            nsim_dev->ddir,
-                           &nsim_dev->fail_trap_counter_get);
+                           &nsim_dev->fail_trap_drop_counter_get);
        nsim_udp_tunnels_debugfs_create(nsim_dev);
        return 0;
 
@@ -1208,14 +1208,14 @@ static int nsim_rate_node_parent_set(struct devlink_rate *child,
 }
 
 static int
-nsim_dev_devlink_trap_hw_counter_get(struct devlink *devlink,
-                                    const struct devlink_trap *trap,
-                                    u64 *p_drops)
+nsim_dev_devlink_trap_drop_counter_get(struct devlink *devlink,
+                                      const struct devlink_trap *trap,
+                                      u64 *p_drops)
 {
        struct nsim_dev *nsim_dev = devlink_priv(devlink);
        u64 *cnt;
 
-       if (nsim_dev->fail_trap_counter_get)
+       if (nsim_dev->fail_trap_drop_counter_get)
                return -EINVAL;
 
        cnt = &nsim_dev->trap_data->trap_pkt_cnt;
@@ -1247,7 +1247,7 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
        .rate_node_del = nsim_rate_node_del,
        .rate_leaf_parent_set = nsim_rate_leaf_parent_set,
        .rate_node_parent_set = nsim_rate_node_parent_set,
-       .trap_drop_counter_get = nsim_dev_devlink_trap_hw_counter_get,
+       .trap_drop_counter_get = nsim_dev_devlink_trap_drop_counter_get,
 };
 
 #define NSIM_DEV_MAX_MACS_DEFAULT 32
index f2304e6..ae46295 100644 (file)
@@ -249,7 +249,7 @@ struct nsim_dev {
        bool fail_trap_group_set;
        bool fail_trap_policer_set;
        bool fail_trap_policer_counter_get;
-       bool fail_trap_counter_get;
+       bool fail_trap_drop_counter_get;
        struct {
                struct udp_tunnel_nic_shared utn_shared;
                u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
index 9bd9a5c..6bbc81a 100644 (file)
@@ -826,16 +826,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
 {
        int err;
 
-       err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+       err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
        if (err < 0)
                return err;
 
        usleep_range(10, 20);
 
-       /* After reset FORCE_LINK_GOOD bit is set. Although the
-        * default value should be unset. Disable FORCE_LINK_GOOD
-        * for the phy to work properly.
-        */
        return phy_modify(phydev, MII_DP83867_PHYCTRL,
                         DP83867_PHYCR_FORCE_LINK_GOOD, 0);
 }
index 2e60bc1..359ea0d 100644 (file)
@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        }
 
        skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
+       dev_kfree_skb_any(skb);
        if (!skb2)
                return NULL;
 
-       dev_kfree_skb_any(skb);
        skb = skb2;
 
 done:
index c67f11e..24753a4 100644 (file)
@@ -1892,7 +1892,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
 static const struct driver_info cdc_ncm_info = {
        .description = "CDC NCM",
        .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
-                       | FLAG_LINK_INTR,
+                       | FLAG_LINK_INTR | FLAG_ETHER,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
        .manage_power = usbnet_manage_power,
index db157f2..6a2e4f8 100644 (file)
@@ -575,7 +575,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
        if (info->flags & QMI_WWAN_FLAG_PASS_THROUGH) {
                skb->protocol = htons(ETH_P_MAP);
-               return (netif_rx(skb) == NET_RX_SUCCESS);
+               return 1;
        }
 
        switch (skb->data[0] & 0xf0) {
index 62cd48d..1692d3b 100644 (file)
@@ -8680,7 +8680,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
        switch (stringset) {
        case ETH_SS_STATS:
-               memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+               memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
                break;
        }
 }
index b286993..13141db 100644 (file)
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        ret = smsc75xx_wait_ready(dev, 0);
        if (ret < 0) {
                netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
-               goto err;
+               goto free_pdata;
        }
 
        smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        ret = smsc75xx_reset(dev);
        if (ret < 0) {
                netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
-               goto err;
+               goto cancel_work;
        }
 
        dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
        return 0;
 
-err:
+cancel_work:
+       cancel_work_sync(&pdata->set_multicast);
+free_pdata:
        kfree(pdata);
+       dev->data[0] = 0;
        return ret;
 }
 
@@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
                cancel_work_sync(&pdata->set_multicast);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
-               pdata = NULL;
                dev->data[0] = 0;
        }
 }
index 57a5a02..470e1c1 100644 (file)
@@ -74,6 +74,23 @@ MODULE_PARM_DESC (msg_level, "Override default message level");
 
 /*-------------------------------------------------------------------------*/
 
+static const char * const usbnet_event_names[] = {
+       [EVENT_TX_HALT]            = "EVENT_TX_HALT",
+       [EVENT_RX_HALT]            = "EVENT_RX_HALT",
+       [EVENT_RX_MEMORY]          = "EVENT_RX_MEMORY",
+       [EVENT_STS_SPLIT]          = "EVENT_STS_SPLIT",
+       [EVENT_LINK_RESET]         = "EVENT_LINK_RESET",
+       [EVENT_RX_PAUSED]          = "EVENT_RX_PAUSED",
+       [EVENT_DEV_ASLEEP]         = "EVENT_DEV_ASLEEP",
+       [EVENT_DEV_OPEN]           = "EVENT_DEV_OPEN",
+       [EVENT_DEVICE_REPORT_IDLE] = "EVENT_DEVICE_REPORT_IDLE",
+       [EVENT_NO_RUNTIME_PM]      = "EVENT_NO_RUNTIME_PM",
+       [EVENT_RX_KILL]            = "EVENT_RX_KILL",
+       [EVENT_LINK_CHANGE]        = "EVENT_LINK_CHANGE",
+       [EVENT_SET_RX_MODE]        = "EVENT_SET_RX_MODE",
+       [EVENT_NO_IP_ALIGN]        = "EVENT_NO_IP_ALIGN",
+};
+
 /* handles CDC Ethernet and many other network "bulk data" interfaces */
 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
 {
@@ -452,9 +469,9 @@ void usbnet_defer_kevent (struct usbnet *dev, int work)
 {
        set_bit (work, &dev->flags);
        if (!schedule_work (&dev->kevent))
-               netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+               netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]);
        else
-               netdev_dbg(dev->net, "kevent %d scheduled\n", work);
+               netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]);
 }
 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
index 0416a7e..b0b8145 100644 (file)
@@ -2847,8 +2847,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
                        ctx[rxq2vq(i)] = true;
        }
 
-       ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
-                                        names, ctx, NULL);
+       ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
+                                 names, ctx, NULL);
        if (ret)
                goto err_find;
 
index 07eaef5..452822f 100644 (file)
@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
 
        dev->flags = IFF_MASTER | IFF_NOARP;
 
-       /* MTU is irrelevant for VRF device; set to 64k similar to lo */
-       dev->mtu = 64 * 1024;
-
        /* similarly, oper state is irrelevant; set to up to avoid confusion */
        dev->operstate = IF_OPER_UP;
        netdev_lockdep_set_classes(dev);
@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
         * which breaks networking.
         */
        dev->min_mtu = IPV6_MIN_MTU;
-       dev->max_mtu = ETH_MAX_MTU;
+       dev->max_mtu = IP6_MAX_MTU;
+       dev->mtu = dev->max_mtu;
 }
 
 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
index 7e431e5..059c2f7 100644 (file)
@@ -92,7 +92,7 @@ static card_t **new_card = &first_card;
 #define phy_node(port)            (0)
 #define winsize(card)             (C101_WINDOW_SIZE)
 #define win0base(card)            ((card)->win0base)
-#define winbase(card)             ((card)->win0base + 0x2000)
+#define winbase(card)             ((card)->win0base + 0x2000)
 #define get_port(card, port)      (card)
 static void sca_msci_intr(port_t *port);
 
@@ -307,7 +307,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
        }
 
        card = kzalloc(sizeof(card_t), GFP_KERNEL);
-       if (card == NULL)
+       if (!card)
                return -ENOBUFS;
 
        card->dev = alloc_hdlcdev(card);
@@ -381,7 +381,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
 
 static int __init c101_init(void)
 {
-       if (hw == NULL) {
+       if (!hw) {
 #ifdef MODULE
                pr_info("no card initialized\n");
 #endif
@@ -416,6 +416,7 @@ static void __exit c101_cleanup(void)
 
        while (card) {
                card_t *ptr = card;
+
                card = card->next_card;
                unregister_hdlc_device(port_to_dev(ptr));
                c101_destroy_card(ptr);
index 6c05c4c..fd61a7c 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- *     Comtrol SV11 card driver
+/*     Comtrol SV11 card driver
  *
  *     This is a slightly odd Z85230 synchronous driver. All you need to
  *     know basically is
@@ -9,7 +8,7 @@
  *
  *     It supports DMA using two DMA channels in SYNC mode. The driver doesn't
  *     use these facilities
- *     
+ *
  *     The control port is at io+1, the data at io+3 and turning off the DMA
  *     is done by writing 0 to io+4
  *
 
 static int dma;
 
-/*
- *     Network driver support routines
+/*     Network driver support routines
  */
 
-static inline struct z8530_devdev_to_sv(struct net_device *dev)
+static inline struct z8530_dev *dev_to_sv(struct net_device *dev)
 {
        return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
 }
 
-/*
- *     Frame receive. Simple for our card as we do HDLC and there
+/*     Frame receive. Simple for our card as we do HDLC and there
  *     is no funny garbage involved
  */
 
@@ -65,15 +62,13 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
        skb->protocol = hdlc_type_trans(skb, c->netdevice);
        skb_reset_mac_header(skb);
        skb->dev = c->netdevice;
-       /*
-        *      Send it to the PPP layer. We don't have time to process
+       /*      Send it to the PPP layer. We don't have time to process
         *      it right now.
         */
        netif_rx(skb);
 }
 
-/*
- *     We've been placed in the UP state
+/*     We've been placed in the UP state
  */
 
 static int hostess_open(struct net_device *d)
@@ -81,19 +76,18 @@ static int hostess_open(struct net_device *d)
        struct z8530_dev *sv11 = dev_to_sv(d);
        int err = -1;
 
-       /*
-        *      Link layer up
+       /*      Link layer up
         */
        switch (dma) {
-               case 0:
-                       err = z8530_sync_open(d, &sv11->chanA);
-                       break;
-               case 1:
-                       err = z8530_sync_dma_open(d, &sv11->chanA);
-                       break;
-               case 2:
-                       err = z8530_sync_txdma_open(d, &sv11->chanA);
-                       break;
+       case 0:
+               err = z8530_sync_open(d, &sv11->chanA);
+               break;
+       case 1:
+               err = z8530_sync_dma_open(d, &sv11->chanA);
+               break;
+       case 2:
+               err = z8530_sync_txdma_open(d, &sv11->chanA);
+               break;
        }
 
        if (err)
@@ -102,15 +96,15 @@ static int hostess_open(struct net_device *d)
        err = hdlc_open(d);
        if (err) {
                switch (dma) {
-                       case 0:
-                               z8530_sync_close(d, &sv11->chanA);
-                               break;
-                       case 1:
-                               z8530_sync_dma_close(d, &sv11->chanA);
-                               break;
-                       case 2:
-                               z8530_sync_txdma_close(d, &sv11->chanA);
-                               break;
+               case 0:
+                       z8530_sync_close(d, &sv11->chanA);
+                       break;
+               case 1:
+                       z8530_sync_dma_close(d, &sv11->chanA);
+                       break;
+               case 2:
+                       z8530_sync_txdma_close(d, &sv11->chanA);
+                       break;
                }
                return err;
        }
@@ -127,8 +121,7 @@ static int hostess_open(struct net_device *d)
 static int hostess_close(struct net_device *d)
 {
        struct z8530_dev *sv11 = dev_to_sv(d);
-       /*
-        *      Discard new frames
+       /*      Discard new frames
         */
        sv11->chanA.rx_function = z8530_null_rx;
 
@@ -136,32 +129,29 @@ static int hostess_close(struct net_device *d)
        netif_stop_queue(d);
 
        switch (dma) {
-               case 0:
-                       z8530_sync_close(d, &sv11->chanA);
-                       break;
-               case 1:
-                       z8530_sync_dma_close(d, &sv11->chanA);
-                       break;
-               case 2:
-                       z8530_sync_txdma_close(d, &sv11->chanA);
-                       break;
+       case 0:
+               z8530_sync_close(d, &sv11->chanA);
+               break;
+       case 1:
+               z8530_sync_dma_close(d, &sv11->chanA);
+               break;
+       case 2:
+               z8530_sync_txdma_close(d, &sv11->chanA);
+               break;
        }
        return 0;
 }
 
 static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
 {
-       /* struct z8530_dev *sv11=dev_to_sv(d);
-          z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
        return hdlc_ioctl(d, ifr, cmd);
 }
 
-/*
- *     Passed network frames, fire them downwind.
+/*     Passed network frames, fire them downwind.
  */
 
 static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb,
-                                           struct net_device *d)
+                                     struct net_device *d)
 {
        return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
 }
@@ -174,8 +164,7 @@ static int hostess_attach(struct net_device *dev, unsigned short encoding,
        return -EINVAL;
 }
 
-/*
- *     Description block for a Comtrol Hostess SV11 card
+/*     Description block for a Comtrol Hostess SV11 card
  */
 
 static const struct net_device_ops hostess_ops = {
@@ -189,8 +178,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
 {
        struct z8530_dev *sv;
        struct net_device *netdev;
-       /*
-        *      Get the needed I/O space
+       /*      Get the needed I/O space
         */
 
        if (!request_region(iobase, 8, "Comtrol SV11")) {
@@ -202,8 +190,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
        if (!sv)
                goto err_kzalloc;
 
-       /*
-        *      Stuff in the I/O addressing
+       /*      Stuff in the I/O addressing
         */
 
        sv->active = 0;
@@ -218,7 +205,8 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
        outb(0, iobase + 4);            /* DMA off */
 
        /* We want a fast IRQ for this device. Actually we'd like an even faster
-          IRQ ;) - This is one driver RtLinux is made for */
+        * IRQ ;) - This is one driver RtLinux is made for
+        */
 
        if (request_irq(irq, z8530_interrupt, 0,
                        "Hostess SV11", sv) < 0) {
@@ -232,8 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
        sv->chanB.dev = sv;
 
        if (dma) {
-               /*
-                *      You can have DMA off or 1 and 3 thats the lot
+               /*      You can have DMA off or 1 and 3 thats the lot
                 *      on the Comtrol.
                 */
                sv->chanA.txdma = 3;
@@ -248,11 +235,11 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
        }
 
        /* Kill our private IRQ line the hostess can end up chattering
-          until the configuration is set */
+        * until the configuration is set
+        */
        disable_irq(irq);
 
-       /*
-        *      Begin normal initialise
+       /*      Begin normal initialise
         */
 
        if (z8530_init(sv)) {
@@ -268,8 +255,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
 
        enable_irq(irq);
 
-       /*
-        *      Now we can take the IRQ
+       /*      Now we can take the IRQ
         */
 
        sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
@@ -340,7 +326,8 @@ static struct z8530_dev *sv11_unit;
 
 int init_module(void)
 {
-       if ((sv11_unit = sv11_init(io, irq)) == NULL)
+       sv11_unit = sv11_init(io, irq);
+       if (!sv11_unit)
                return -ENODEV;
        return 0;
 }
index f42bf2c..b8a4bbf 100644 (file)
@@ -2795,7 +2795,7 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
        switch (ar->scan.state) {
        case ATH10K_SCAN_IDLE:
        case ATH10K_SCAN_STARTING:
-               ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
+               ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
                goto exit;
        case ATH10K_SCAN_RUNNING:
        case ATH10K_SCAN_ABORTING:
index eb52332..e9b3689 100644 (file)
@@ -1314,10 +1314,16 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
 
        arg->he_flag = true;
 
-       memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
-              sizeof(arg->peer_he_cap_macinfo));
-       memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
-              sizeof(arg->peer_he_cap_phyinfo));
+       memcpy_and_pad(&arg->peer_he_cap_macinfo,
+                      sizeof(arg->peer_he_cap_macinfo),
+                      he_cap->he_cap_elem.mac_cap_info,
+                      sizeof(he_cap->he_cap_elem.mac_cap_info),
+                      0);
+       memcpy_and_pad(&arg->peer_he_cap_phyinfo,
+                      sizeof(arg->peer_he_cap_phyinfo),
+                      he_cap->he_cap_elem.phy_cap_info,
+                      sizeof(he_cap->he_cap_elem.phy_cap_info),
+                      0);
        arg->peer_he_ops = vif->bss_conf.he_oper.params;
 
        /* the top most byte is used to indicate BSS color info */
index f8f6b20..646ad79 100644 (file)
@@ -41,7 +41,7 @@
 static const struct pci_device_id ath11k_pci_id_table[] = {
        { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
        { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
-       /* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */
+       { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
        {0}
 };
 
index 97c3a53..1398315 100644 (file)
@@ -2654,7 +2654,7 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
 
 static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
-                                u16 duration)
+                                struct ieee80211_prep_tx_info *info)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
index cf8e52c..0e3be17 100644 (file)
@@ -445,22 +445,12 @@ out:
        return ret;
 }
 
-static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
-                        enum wcn36xx_hal_host_msg_type msg_type,
-                        size_t msg_size)
-{
-       memset(hdr, 0, msg_size + sizeof(*hdr));
-       hdr->msg_type = msg_type;
-       hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
-       hdr->len = msg_size + sizeof(*hdr);
-}
-
 #define __INIT_HAL_MSG(msg_body, type, version) \
        do {                                                            \
-               memset(&msg_body, 0, sizeof(msg_body));                 \
-               msg_body.header.msg_type = type;                        \
-               msg_body.header.msg_version = version;                  \
-               msg_body.header.len = sizeof(msg_body);                 \
+               memset(&(msg_body), 0, sizeof(msg_body));               \
+               (msg_body).header.msg_type = type;                      \
+               (msg_body).header.msg_version = version;                \
+               (msg_body).header.len = sizeof(msg_body);               \
        } while (0)                                                     \
 
 #define INIT_HAL_MSG(msg_body, type)   \
@@ -2729,8 +2719,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
 
        msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
                   wcn->hal_buf;
-       init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
-                    sizeof(msg_body->mc_addr_list));
+       INIT_HAL_MSG(*msg_body, WCN36XX_HAL_8023_MULTICAST_LIST_REQ);
 
        /* An empty list means all mc traffic will be received */
        if (fp)
index 65fb038..cedba56 100644 (file)
@@ -2895,8 +2895,13 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
                                             &cfg->assoclist,
                                             sizeof(cfg->assoclist));
                if (err) {
-                       bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
-                                err);
+                       /* GET_ASSOCLIST unsupported by firmware of older chips */
+                       if (err == -EBADE)
+                               bphy_info_once(drvr, "BRCMF_C_GET_ASSOCLIST unsupported\n");
+                       else
+                               bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST failed, err=%d\n",
+                                        err);
+
                        cfg->assoclist.count = 0;
                        return -EOPNOTSUPP;
                }
@@ -6851,7 +6856,12 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
 
        err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
        if (err) {
-               bphy_err(drvr, "rxchain error (%d)\n", err);
+               /* rxchain unsupported by firmware of older chips */
+               if (err == -EBADE)
+                       bphy_info_once(drvr, "rxchain unsupported\n");
+               else
+                       bphy_err(drvr, "rxchain error (%d)\n", err);
+
                nchain = 1;
        } else {
                for (nchain = 0; rxchain; nchain++)
index cee1682..db5f853 100644 (file)
@@ -188,9 +188,14 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
        /*Finally, pick up the PROMISC flag */
        cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
        err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
-       if (err < 0)
-               bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, %d\n",
-                        err);
+       if (err < 0) {
+               /* PROMISC unsupported by firmware of older chips */
+               if (err == -EBADE)
+                       bphy_info_once(drvr, "BRCMF_C_SET_PROMISC unsupported\n");
+               else
+                       bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, err=%d\n",
+                                err);
+       }
        brcmf_configure_arp_nd_offload(ifp, !cmd_value);
 }
 
index 44ba6f3..9bb5f70 100644 (file)
@@ -60,6 +60,10 @@ void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...);
                                  ##__VA_ARGS__);                       \
        } while (0)
 
+#define bphy_info_once(drvr, fmt, ...)                                 \
+       wiphy_info_once((drvr)->wiphy, "%s: " fmt, __func__,            \
+                       ##__VA_ARGS__)
+
 #if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
 
 /* For debug/tracing purposes treat info messages as errors */
index a755426..2f7bc3a 100644 (file)
 #include "common.h"
 #include "of.h"
 
+static int brcmf_of_get_country_codes(struct device *dev,
+                                     struct brcmf_mp_device *settings)
+{
+       struct device_node *np = dev->of_node;
+       struct brcmfmac_pd_cc_entry *cce;
+       struct brcmfmac_pd_cc *cc;
+       int count;
+       int i;
+
+       count = of_property_count_strings(np, "brcm,ccode-map");
+       if (count < 0) {
+               /* The property is optional, so return success if it doesn't
+                * exist. Otherwise propagate the error code.
+                */
+               return (count == -EINVAL) ? 0 : count;
+       }
+
+       cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL);
+       if (!cc)
+               return -ENOMEM;
+
+       cc->table_size = count;
+
+       for (i = 0; i < count; i++) {
+               const char *map;
+
+               cce = &cc->table[i];
+
+               if (of_property_read_string_index(np, "brcm,ccode-map",
+                                                 i, &map))
+                       continue;
+
+               /* String format e.g. US-Q2-86 */
+               if (sscanf(map, "%2c-%2c-%d", cce->iso3166, cce->cc,
+                          &cce->rev) != 3)
+                       brcmf_err("failed to read country map %s\n", map);
+               else
+                       brcmf_dbg(INFO, "%s-%s-%d\n", cce->iso3166, cce->cc,
+                                 cce->rev);
+       }
+
+       settings->country_codes = cc;
+
+       return 0;
+}
+
 void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
                    struct brcmf_mp_device *settings)
 {
        struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
        struct device_node *root, *np = dev->of_node;
        int irq;
+       int err;
        u32 irqf;
        u32 val;
 
@@ -43,8 +90,14 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
                of_node_put(root);
        }
 
-       if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
-           !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+       if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+               return;
+
+       err = brcmf_of_get_country_codes(dev, settings);
+       if (err)
+               brcmf_err("failed to get OF country code map (err=%d)\n", err);
+
+       if (bus_type != BRCMF_BUSTYPE_SDIO)
                return;
 
        if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
index 14b0db2..d86918d 100644 (file)
@@ -16,9 +16,10 @@ iwlwifi-objs         += iwl-trans.o
 iwlwifi-objs           += queue/tx.o
 
 iwlwifi-objs           += fw/img.o fw/notif-wait.o
-iwlwifi-objs           += fw/dbg.o fw/pnvm.o
+iwlwifi-objs           += fw/dbg.o fw/pnvm.o fw/dump.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
 iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
+iwlwifi-$(CONFIG_EFI)  += fw/uefi.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
 
 iwlwifi-objs += $(iwlwifi-m)
index c2315de..7f1faa9 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/module.h>
 #include <linux/stringify.h>
@@ -9,7 +9,7 @@
 #include "iwl-prph.h"
 
 /* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX        63
+#define IWL_22000_UCODE_API_MAX        64
 
 /* Lowest firmware API version supported */
 #define IWL_22000_UCODE_API_MIN        39
@@ -47,6 +47,7 @@
 #define IWL_MA_A_GF_A_FW_PRE           "iwlwifi-ma-a0-gf-a0-"
 #define IWL_MA_A_GF4_A_FW_PRE          "iwlwifi-ma-a0-gf4-a0-"
 #define IWL_MA_A_MR_A_FW_PRE           "iwlwifi-ma-a0-mr-a0-"
+#define IWL_MA_A_FM_A_FW_PRE           "iwlwifi-ma-a0-fm-a0-"
 #define IWL_SNJ_A_MR_A_FW_PRE          "iwlwifi-SoSnj-a0-mr-a0-"
 #define IWL_BZ_A_HR_B_FW_PRE           "iwlwifi-bz-a0-hr-b0-"
 #define IWL_BZ_A_GF_A_FW_PRE           "iwlwifi-bz-a0-gf-a0-"
@@ -93,6 +94,8 @@
        IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
        IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api)          \
+       IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
        IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
@@ -389,6 +392,7 @@ const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
 const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
 const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
 const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
+const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
 const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
 
 const char iwl_ax200_killer_1650w_name[] =
@@ -724,6 +728,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
        .num_rbds = IWL_NUM_RBDS_AX210_HE,
 };
 
+const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
+       .fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
+       .uhb_supported = true,
+       IWL_DEVICE_AX210,
+       .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
 const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
        .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
        .uhb_supported = true,
@@ -797,6 +808,7 @@ MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index df12973..871533b 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/module.h>
 #include <linux/stringify.h>
@@ -171,8 +171,12 @@ const char iwl9260_killer_1550_name[] =
        "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
 const char iwl9560_killer_1550i_name[] =
        "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550i_160_name[] =
+       "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
 const char iwl9560_killer_1550s_name[] =
        "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550s_160_name[] =
+       "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
 
 const struct iwl_cfg iwl9260_2ac_cfg = {
        .fw_name_pre = IWL9260_FW_PRE,
index e31bba8..34933f1 100644 (file)
@@ -163,6 +163,27 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
 }
 IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
 
+/*
+ * Evaluate a DSM with no arguments and a u32 return value,
+ */
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+                        const guid_t *guid, u32 *value)
+{
+       int ret;
+       u64 val;
+
+       ret = iwl_acpi_get_dsm_integer(dev, rev, func,
+                                      guid, &val, sizeof(u32));
+
+       if (ret < 0)
+               return ret;
+
+       /* cast val (u64) to be u32 */
+       *value = (u32)val;
+       return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
+
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                         union acpi_object *data,
                                         int data_size, int *tbl_rev)
@@ -696,68 +717,37 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
 }
 IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
 
-static u32 iwl_acpi_eval_dsm_func(struct device *dev, enum iwl_dsm_funcs_rev_0 eval_func)
-{
-       union acpi_object *obj;
-       u32 ret;
-
-       obj = iwl_acpi_get_dsm_object(dev, 0,
-                                     eval_func, NULL,
-                                     &iwl_guid);
-
-       if (IS_ERR(obj)) {
-               IWL_DEBUG_DEV_RADIO(dev,
-                                   "ACPI: DSM func '%d': Got Error in obj = %ld\n",
-                                   eval_func,
-                                   PTR_ERR(obj));
-               return 0;
-       }
-
-       if (obj->type != ACPI_TYPE_INTEGER) {
-               IWL_DEBUG_DEV_RADIO(dev,
-                                   "ACPI: DSM func '%d' did not return a valid object, type=%d\n",
-                                   eval_func,
-                                   obj->type);
-               ret = 0;
-               goto out;
-       }
-
-       ret = obj->integer.value;
-       IWL_DEBUG_DEV_RADIO(dev,
-                           "ACPI: DSM method evaluated: func='%d', ret=%d\n",
-                           eval_func,
-                           ret);
-out:
-       ACPI_FREE(obj);
-       return ret;
-}
-
 __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
 {
-       u32 ret;
+       int ret;
+       u8 value;
        __le32 config_bitmap = 0;
 
        /*
         ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
         */
-       ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2);
+       ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+                                 DSM_FUNC_ENABLE_INDONESIA_5G2,
+                                 &iwl_guid, &value);
 
-       if (ret == DSM_VALUE_INDONESIA_ENABLE)
+       if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
                config_bitmap |=
                        cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
 
        /*
         ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
         */
-       ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD);
-
-       if (ret == DSM_VALUE_SRD_PASSIVE)
-               config_bitmap |=
-                       cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
-
-       else if (ret == DSM_VALUE_SRD_DISABLE)
-               config_bitmap |=
-                       cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+       ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
+                                 DSM_FUNC_DISABLE_SRD,
+                                 &iwl_guid, &value);
+       if (!ret) {
+               if (value == DSM_VALUE_SRD_PASSIVE)
+                       config_bitmap |=
+                               cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+               else if (value == DSM_VALUE_SRD_DISABLE)
+                       config_bitmap |=
+                               cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+       }
 
        return config_bitmap;
 }
index d16e6ec..b858e99 100644 (file)
@@ -78,6 +78,7 @@ enum iwl_dsm_funcs_rev_0 {
        DSM_FUNC_DISABLE_SRD = 1,
        DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
        DSM_FUNC_11AX_ENABLEMENT = 6,
+       DSM_FUNC_ENABLE_UNII4_CHAN = 7
 };
 
 enum iwl_dsm_values_srd {
@@ -116,6 +117,9 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method);
 int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
                        const guid_t *guid, u8 *value);
 
+int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+                        const guid_t *guid, u32 *value);
+
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                         union acpi_object *data,
                                         int data_size, int *tbl_rev);
@@ -182,6 +186,12 @@ static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
        return -ENOENT;
 }
 
+static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
+                                      const guid_t *guid, u32 *value)
+{
+       return -ENOENT;
+}
+
 static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                                       union acpi_object *data,
                                                       int data_size,
index c625d31..ce060c3 100644 (file)
@@ -535,11 +535,6 @@ enum iwl_legacy_cmds {
        OFFLOADS_QUERY_CMD = 0xd5,
 
        /**
-        * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
-        */
-       REMOTE_WAKE_CONFIG_CMD = 0xd6,
-
-       /**
         * @D0I3_END_CMD: End D0i3/D3 state, no command data
         */
        D0I3_END_CMD = 0xed,
index 7586390..b2e7ef3 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -159,6 +159,22 @@ struct iwl_proto_offload_cmd_v3_large {
        struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
 } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
 
+/**
+ * struct iwl_proto_offload_cmd_v4 - ARP/NS offload configuration
+ * @sta_id: station id
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v4 {
+       __le32 sta_id;
+       struct iwl_proto_offload_cmd_common common;
+       __le32 num_valid_ipv6_addrs;
+       struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+       struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_4 */
+
 /*
  * WOWLAN_PATTERNS
  */
@@ -302,13 +318,23 @@ struct iwl_wowlan_patterns_cmd {
        /**
         * @n_patterns: number of patterns
         */
-       __le32 n_patterns;
+       u8 n_patterns;
+
+       /**
+        * @n_patterns: sta_id
+        */
+       u8 sta_id;
+
+       /**
+        * @reserved: reserved for alignment
+        */
+       __le16 reserved;
 
        /**
         * @patterns: the patterns, array length in @n_patterns
         */
        struct iwl_wowlan_pattern_v2 patterns[];
-} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_3 */
 
 enum iwl_wowlan_wakeup_filters {
        IWL_WOWLAN_WAKEUP_MAGIC_PACKET                  = BIT(0),
@@ -339,9 +365,10 @@ enum iwl_wowlan_flags {
 };
 
 /**
- * struct iwl_wowlan_config_cmd - WoWLAN configuration
+ * struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6)
  * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
- * @non_qos_seq: non-QoS sequence counter to use next
+ * @non_qos_seq: non-QoS sequence counter to use next.
+ *               Reserved if the struct has version >= 6.
  * @qos_seq: QoS sequence counters to use next
  * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
  * @is_11n_connection: indicates HT connection
@@ -456,6 +483,23 @@ struct iwl_wowlan_kek_kck_material_cmd_v3 {
        __le32  bigtk_cipher;
 } __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
 
+struct iwl_wowlan_kek_kck_material_cmd_v4 {
+       __le32  sta_id;
+       u8      kck[IWL_KCK_MAX_SIZE];
+       u8      kek[IWL_KEK_MAX_SIZE];
+       __le16  kck_len;
+       __le16  kek_len;
+       __le64  replay_ctr;
+       __le32  akm;
+       __le32  gtk_cipher;
+       __le32  igtk_cipher;
+       __le32  bigtk_cipher;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_4 */
+
+struct iwl_wowlan_get_status_cmd {
+       __le32  sta_id;
+} __packed; /* WOWLAN_GET_STATUSES_CMD_API_S_VER_1 */
+
 #define RF_KILL_INDICATOR_FOR_WOWLAN   0x87
 
 enum iwl_wowlan_rekey_status {
@@ -604,12 +648,13 @@ struct iwl_wowlan_status_v7 {
 } __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
 
 /**
- * struct iwl_wowlan_status_v9 - WoWLAN status (version 9)
+ * struct iwl_wowlan_status_v9 - WoWLAN status (versions 9 and 10)
  * @gtk: GTK data
  * @igtk: IGTK data
  * @replay_ctr: GTK rekey replay counter
  * @pattern_number: number of the matched pattern
- * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next.
+ *                   Reserved if the struct has version >= 10.
  * @qos_seq_ctr: QoS sequence counters to use next
  * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
  * @num_of_gtk_rekeys: number of GTK rekeys
@@ -638,7 +683,7 @@ struct iwl_wowlan_status_v9 {
        u8 tid_tear_down;
        u8 reserved[3];
        u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
+} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
 
 /**
  * struct iwl_wowlan_status - WoWLAN status
@@ -683,55 +728,6 @@ static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
        return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
 }
 
-#define IWL_WOWLAN_TCP_MAX_PACKET_LEN          64
-#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN  128
-#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS      2048
-
-struct iwl_tcp_packet_info {
-       __le16 tcp_pseudo_header_checksum;
-       __le16 tcp_payload_length;
-} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
-
-struct iwl_tcp_packet {
-       struct iwl_tcp_packet_info info;
-       u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
-       u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
-} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
-
-struct iwl_remote_wake_packet {
-       struct iwl_tcp_packet_info info;
-       u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
-       u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
-} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
-
-struct iwl_wowlan_remote_wake_config {
-       __le32 connection_max_time; /* unused */
-       /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
-       u8 max_syn_retries;
-       u8 max_data_retries;
-       u8 tcp_syn_ack_timeout;
-       u8 tcp_ack_timeout;
-
-       struct iwl_tcp_packet syn_tx;
-       struct iwl_tcp_packet synack_rx;
-       struct iwl_tcp_packet keepalive_ack_rx;
-       struct iwl_tcp_packet fin_tx;
-
-       struct iwl_remote_wake_packet keepalive_tx;
-       struct iwl_remote_wake_packet wake_rx;
-
-       /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
-       u8 sequence_number_offset;
-       u8 sequence_number_length;
-       u8 token_offset;
-       u8 token_length;
-       /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
-       __le32 initial_sequence_number;
-       __le16 keepalive_interval;
-       __le16 num_tokens;
-       u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
-} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
-
 /* TODO: NetDetect API */
 
 #endif /* __iwl_fw_api_d3_h__ */
index d299bba..985b0dc 100644 (file)
@@ -64,6 +64,12 @@ enum iwl_data_path_subcmd_ids {
        RX_NO_DATA_NOTIF = 0xF5,
 
        /**
+        * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+        *      &struct iwl_thermal_dual_chain_request
+        */
+       THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
+
+       /**
         * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
         */
        TLC_MNG_UPDATE_NOTIF = 0xF7,
@@ -169,4 +175,24 @@ struct iwl_datapath_monitor_notif {
        u8 reserved[3];
 } __packed; /* MONITOR_NTF_API_S_VER_1 */
 
+/**
+ * enum iwl_thermal_dual_chain_req_events - firmware SMPS request event
+ * @THERMAL_DUAL_CHAIN_REQ_ENABLE: (re-)enable dual-chain operation
+ *     (subject to other constraints)
+ * @THERMAL_DUAL_CHAIN_REQ_DISABLE: disable dual-chain operation
+ *     (static SMPS)
+ */
+enum iwl_thermal_dual_chain_req_events {
+       THERMAL_DUAL_CHAIN_REQ_ENABLE,
+       THERMAL_DUAL_CHAIN_REQ_DISABLE,
+}; /* THERMAL_DUAL_CHAIN_DISABLE_STATE_API_E_VER_1 */
+
+/**
+ * struct iwl_thermal_dual_chain_request - SMPS request
+ * @event: the type of request, see &enum iwl_thermal_dual_chain_req_events
+ */
+struct iwl_thermal_dual_chain_request {
+       __le32 event;
+} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_datapath_h__ */
index 996d5cc..5a2d9a1 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #ifndef __iwl_fw_dbg_tlv_h__
 #define __iwl_fw_dbg_tlv_h__
@@ -11,6 +11,7 @@
 #define IWL_FW_INI_MAX_NAME                    32
 #define IWL_FW_INI_MAX_CFG_NAME                        64
 #define IWL_FW_INI_DOMAIN_ALWAYS_ON            0
+#define IWL_FW_INI_REGION_V2_MASK              0x0000FFFF
 
 /**
  * struct iwl_fw_ini_hcmd
index dc8f277..cf48c6f 100644 (file)
@@ -453,6 +453,25 @@ struct iwl_lari_config_change_cmd_v3 {
 } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */
 
 /**
+ * struct iwl_lari_config_change_cmd_v4 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ *     different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ *     per country, one to indicate whether to override and the other to
+ *     indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ *     per country, one to indicate whether to override and the other to
+ *     indicate allow/disallow unii4 channels.
+ */
+struct iwl_lari_config_change_cmd_v4 {
+       __le32 config_bitmap;
+       __le32 oem_uhb_allow_bitmap;
+       __le32 oem_11ax_allow_bitmap;
+       __le32 oem_unii4_allow_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */
+
+/**
  * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
  * @status: PNVM image loading status
  */
index cc4e18c..df7c55e 100644 (file)
@@ -1933,6 +1933,13 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
        u32 num_of_ranges, i, size;
        void *range;
 
+       /*
+        * The higher part of the ID in version 2 is irrelevant for
+        * us, so mask it out.
+        */
+       if (le32_to_cpu(reg->hdr.version) == 2)
+               id &= IWL_FW_INI_REGION_V2_MASK;
+
        if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
            !ops->fill_range)
                return 0;
@@ -1957,7 +1964,7 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
        num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
 
        header = (void *)tlv->data;
-       header->region_id = reg->id;
+       header->region_id = cpu_to_le32(id);
        header->num_of_ranges = cpu_to_le32(num_of_ranges);
        header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
        memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
@@ -2752,44 +2759,6 @@ void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt)
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
 
-#define FSEQ_REG(x) { .addr = (x), .str = #x, }
-
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
-{
-       struct iwl_trans *trans = fwrt->trans;
-       int i;
-       struct {
-               u32 addr;
-               const char *str;
-       } fseq_regs[] = {
-               FSEQ_REG(FSEQ_ERROR_CODE),
-               FSEQ_REG(FSEQ_TOP_INIT_VERSION),
-               FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
-               FSEQ_REG(FSEQ_OTP_VERSION),
-               FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
-               FSEQ_REG(FSEQ_ALIVE_TOKEN),
-               FSEQ_REG(FSEQ_CNVI_ID),
-               FSEQ_REG(FSEQ_CNVR_ID),
-               FSEQ_REG(CNVI_AUX_MISC_CHIP),
-               FSEQ_REG(CNVR_AUX_MISC_CHIP),
-               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
-               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
-       };
-
-       if (!iwl_trans_grab_nic_access(trans))
-               return;
-
-       IWL_ERR(fwrt, "Fseq Registers:\n");
-
-       for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
-               IWL_ERR(fwrt, "0x%08X | %s\n",
-                       iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
-                       fseq_regs[i].str);
-
-       iwl_trans_release_nic_access(trans);
-}
-IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);
-
 static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
 {
        struct iwl_dbg_suspend_resume_cmd cmd = {
index 49fa2f5..c0e84ef 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -321,4 +321,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
                fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor);
        }
 }
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
 #endif  /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
new file mode 100644 (file)
index 0000000..a184220
--- /dev/null
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/devcoredump.h>
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table_v1 {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 pc;                 /* program counter */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 bcon_time;          /* beacon timer */
+       u32 tsf_low;            /* network timestamp function timer */
+       u32 tsf_hi;             /* network timestamp function timer */
+       u32 gp1;                /* GP1 timer register */
+       u32 gp2;                /* GP2 timer register */
+       u32 gp3;                /* GP3 timer register */
+       u32 ucode_ver;          /* uCode version */
+       u32 hw_ver;             /* HW Silicon version */
+       u32 brd_ver;            /* HW board version */
+       u32 log_pc;             /* log program counter */
+       u32 frame_ptr;          /* frame pointer */
+       u32 stack_ptr;          /* stack pointer */
+       u32 hcmd;               /* last host command header */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
+       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
+       u32 wait_event;         /* wait event() caller address */
+       u32 l2p_control;        /* L2pControlField */
+       u32 l2p_duration;       /* L2pDurationField */
+       u32 l2p_mhvalid;        /* L2pMhValidBits */
+       u32 l2p_addr_match;     /* L2pAddrMatchStat */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
+       u32 flow_handler;       /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 trm_hw_status0;     /* TRM HW status */
+       u32 trm_hw_status1;     /* TRM HW status */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 bcon_time;          /* beacon timer */
+       u32 tsf_low;            /* network timestamp function timer */
+       u32 tsf_hi;             /* network timestamp function timer */
+       u32 gp1;                /* GP1 timer register */
+       u32 gp2;                /* GP2 timer register */
+       u32 fw_rev_type;        /* firmware revision type */
+       u32 major;              /* uCode version major */
+       u32 minor;              /* uCode version minor */
+       u32 hw_ver;             /* HW Silicon version */
+       u32 brd_ver;            /* HW board version */
+       u32 log_pc;             /* log program counter */
+       u32 frame_ptr;          /* frame pointer */
+       u32 stack_ptr;          /* stack pointer */
+       u32 hcmd;               /* last host command header */
+       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
+                                * rxtx_flag */
+       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
+                                * host_flag */
+       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
+                                * enc_flag */
+       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
+                                * time_flag */
+       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
+                                * wico interrupt */
+       u32 last_cmd_id;        /* last HCMD id handled by the firmware */
+       u32 wait_event;         /* wait event() caller address */
+       u32 l2p_control;        /* L2pControlField */
+       u32 l2p_duration;       /* L2pDurationField */
+       u32 l2p_mhvalid;        /* L2pMhValidBits */
+       u32 l2p_addr_match;     /* L2pAddrMatchStat */
+       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
+                                * (LMPM_PMG_SEL) */
+       u32 u_timestamp;        /* indicate when the date and time of the
+                                * compilation */
+       u32 flow_handler;       /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 data3;              /* error-specific data */
+       u32 umac_major;
+       u32 umac_minor;
+       u32 frame_pointer;      /* core register 27*/
+       u32 stack_pointer;      /* core register 28 */
+       u32 cmd_header;         /* latest host cmd sent to UMAC */
+       u32 nic_isr_pref;       /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       struct iwl_umac_error_event_table table = {};
+       u32 base = fwrt->trans->dbg.umac_error_event_table;
+
+       if (!base &&
+           !(fwrt->trans->dbg.error_event_table_tlv_status &
+             IWL_ERROR_EVENT_TABLE_UMAC))
+               return;
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (table.valid)
+               fwrt->dump.umac_err_id = table.error_id;
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+                       fwrt->trans->status, table.valid);
+       }
+
+       IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
+               iwl_fw_lookup_assert_desc(table.error_id));
+       IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
+       IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
+       IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
+       IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
+       IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
+       IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
+       IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
+       IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
+       IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
+       IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
+       IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
+       IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
+       IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
+}
+
+static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       struct iwl_error_event_table table = {};
+       u32 val, base = fwrt->trans->dbg.lmac_error_event_table[lmac_num];
+
+       if (fwrt->cur_fw_img == IWL_UCODE_INIT) {
+               if (!base)
+                       base = fwrt->fw->init_errlog_ptr;
+       } else {
+               if (!base)
+                       base = fwrt->fw->inst_errlog_ptr;
+       }
+
+       if (base < 0x400000) {
+               IWL_ERR(fwrt,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base,
+                       (fwrt->cur_fw_img == IWL_UCODE_INIT)
+                       ? "Init" : "RT");
+               return;
+       }
+
+       /* check if there is a HW error */
+       val = iwl_trans_read_mem32(trans, base);
+       if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
+               int err;
+
+               IWL_ERR(trans, "HW error, resetting before reading\n");
+
+               /* reset the device */
+               iwl_trans_sw_reset(trans);
+
+               err = iwl_finish_nic_init(trans, trans->trans_cfg);
+               if (err)
+                       return;
+       }
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (table.valid)
+               fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
+                       fwrt->trans->status, table.valid);
+       }
+
+       /* Do not change this output - scripts rely on it */
+
+       IWL_ERR(fwrt, "Loaded firmware version: %s\n", fwrt->fw->fw_version);
+
+       IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
+               iwl_fw_lookup_assert_desc(table.error_id));
+       IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+       IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
+       IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
+       IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
+       IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
+       IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
+       IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
+       IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
+       IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
+       IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
+       IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
+       IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
+       IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
+       IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
+       IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
+       IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
+       IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
+       IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
+       IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
+       IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
+       IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
+       IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
+       IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
+       IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
+       IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
+       IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
+       IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
+       IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
+       IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+       IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+       IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+       IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
+       IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+/*
+ * TCM error struct.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_tcm_error_event_table {
+       u32 valid;
+       u32 error_id;
+       u32 blink2;
+       u32 ilink1;
+       u32 ilink2;
+       u32 data1, data2, data3;
+       u32 logpc;
+       u32 frame_pointer;
+       u32 stack_pointer;
+       u32 msgid;
+       u32 isr;
+       u32 hw_status[5];
+       u32 sw_status[1];
+       u32 reserved[4];
+} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
+
+static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       struct iwl_tcm_error_event_table table = {};
+       u32 base = fwrt->trans->dbg.tcm_error_event_table;
+       int i;
+
+       if (!base ||
+           !(fwrt->trans->dbg.error_event_table_tlv_status &
+             IWL_ERROR_EVENT_TABLE_TCM))
+               return;
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       IWL_ERR(fwrt, "TCM status:\n");
+       IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
+       IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
+       IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
+       IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
+       IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
+       IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
+       IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
+       IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
+       IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
+       IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
+       IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
+       IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
+       for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
+               IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n",
+                       table.hw_status[i], i);
+       for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
+               IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
+                       table.sw_status[i], i);
+}
+
+static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       u32 error, data1;
+
+       if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+               error = UMAG_SB_CPU_2_STATUS;
+               data1 = UMAG_SB_CPU_1_STATUS;
+       } else if (fwrt->trans->trans_cfg->device_family >=
+                  IWL_DEVICE_FAMILY_8000) {
+               error = SB_CPU_2_STATUS;
+               data1 = SB_CPU_1_STATUS;
+       } else {
+               return;
+       }
+
+       error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
+
+       IWL_ERR(trans, "IML/ROM dump:\n");
+
+       if (error & 0xFFFF0000)
+               IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
+
+       IWL_ERR(fwrt, "0x%08X | IML/ROM error/state\n", error);
+       IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
+               iwl_read_umac_prph(trans, data1));
+
+       if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+               IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
+                       iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
+}
+
+#define FSEQ_REG(x) { .addr = (x), .str = #x, }
+
+static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt)
+{
+       struct iwl_trans *trans = fwrt->trans;
+       int i;
+       struct {
+               u32 addr;
+               const char *str;
+       } fseq_regs[] = {
+               FSEQ_REG(FSEQ_ERROR_CODE),
+               FSEQ_REG(FSEQ_TOP_INIT_VERSION),
+               FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
+               FSEQ_REG(FSEQ_OTP_VERSION),
+               FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
+               FSEQ_REG(FSEQ_ALIVE_TOKEN),
+               FSEQ_REG(FSEQ_CNVI_ID),
+               FSEQ_REG(FSEQ_CNVR_ID),
+               FSEQ_REG(CNVI_AUX_MISC_CHIP),
+               FSEQ_REG(CNVR_AUX_MISC_CHIP),
+               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
+               FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
+       };
+
+       if (!iwl_trans_grab_nic_access(trans))
+               return;
+
+       IWL_ERR(fwrt, "Fseq Registers:\n");
+
+       for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
+               IWL_ERR(fwrt, "0x%08X | %s\n",
+                       iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
+                       fseq_regs[i].str);
+
+       iwl_trans_release_nic_access(trans);
+}
+
+void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
+{
+       if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
+               IWL_ERR(fwrt,
+                       "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+               return;
+       }
+
+       iwl_fwrt_dump_lmac_error_log(fwrt, 0);
+       if (fwrt->trans->dbg.lmac_error_event_table[1])
+               iwl_fwrt_dump_lmac_error_log(fwrt, 1);
+       iwl_fwrt_dump_umac_error_log(fwrt);
+       iwl_fwrt_dump_tcm_error_log(fwrt);
+       iwl_fwrt_dump_iml_error_log(fwrt);
+       iwl_fwrt_dump_fseq_regs(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);
index f9c5cf5..9a8c7b7 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2008-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -52,7 +52,8 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_INIT_DATA         = 4,
        IWL_UCODE_TLV_BOOT              = 5,
        IWL_UCODE_TLV_PROBE_MAX_LEN     = 6, /* a u32 value */
-       IWL_UCODE_TLV_PAN               = 7,
+       IWL_UCODE_TLV_PAN               = 7, /* deprecated -- only used in DVM */
+       IWL_UCODE_TLV_MEM_DESC          = 7, /* replaces PAN in non-DVM */
        IWL_UCODE_TLV_RUNT_EVTLOG_PTR   = 8,
        IWL_UCODE_TLV_RUNT_EVTLOG_SIZE  = 9,
        IWL_UCODE_TLV_RUNT_ERRLOG_PTR   = 10,
@@ -97,6 +98,7 @@ enum iwl_ucode_tlv_type {
 
        IWL_UCODE_TLV_PNVM_VERSION              = 62,
        IWL_UCODE_TLV_PNVM_SKU                  = 64,
+       IWL_UCODE_TLV_TCM_DEBUG_ADDRS           = 65,
 
        IWL_UCODE_TLV_FW_NUM_STATIONS           = IWL_UCODE_TLV_CONST_BASE + 0,
 
@@ -277,10 +279,11 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_BAND_IN_RX_DATA       = (__force iwl_ucode_tlv_api_t)59,
 
 
-       NUM_IWL_UCODE_TLV_API
 #ifdef __CHECKER__
-               /* sparse says it cannot increment the previous enum member */
-               = 128
+       /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_API 128
+#else
+       NUM_IWL_UCODE_TLV_API
 #endif
 };
 
@@ -411,6 +414,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_PROTECTED_TWT                = (__force iwl_ucode_tlv_capa_t)56,
        IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE           = (__force iwl_ucode_tlv_capa_t)57,
        IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN            = (__force iwl_ucode_tlv_capa_t)58,
+       IWL_UCODE_TLV_CAPA_BROADCAST_TWT                = (__force iwl_ucode_tlv_capa_t)60,
 
        /* set 2 */
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
@@ -446,10 +450,11 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT                = (__force iwl_ucode_tlv_capa_t)100,
        IWL_UCODE_TLV_CAPA_RFIM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)102,
 
-       NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
-               /* sparse says it cannot increment the previous enum member */
-               = 128
+       /* sparse says it cannot increment the previous enum member */
+#define NUM_IWL_UCODE_TLV_CAPA 128
+#else
+       NUM_IWL_UCODE_TLV_CAPA
 #endif
 };
 
@@ -946,6 +951,10 @@ struct iwl_fw_cmd_version {
        u8 notif_ver;
 } __packed;
 
+struct iwl_fw_tcm_error_addr {
+       __le32 addr;
+}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */
+
 static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
                                        size_t fixed_size, size_t var_size)
 {
index 40f2109..2403490 100644 (file)
@@ -10,7 +10,7 @@
 #include "fw/api/commands.h"
 #include "fw/api/nvm-reg.h"
 #include "fw/api/alive.h"
-#include <linux/efi.h>
+#include "fw/uefi.h"
 
 struct iwl_pnvm_section {
        __le32 offset;
@@ -220,83 +220,6 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
        return -ENOENT;
 }
 
-#if defined(CONFIG_EFI)
-
-#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,  \
-                                 0xb2, 0xec, 0xf5, 0xa3,       \
-                                 0x59, 0x4f, 0x4a, 0xea)
-
-#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
-
-#define IWL_HARDCODED_PNVM_SIZE 4096
-
-struct pnvm_sku_package {
-       u8 rev;
-       u8 reserved1[3];
-       u32 total_size;
-       u8 n_skus;
-       u8 reserved2[11];
-       u8 data[];
-};
-
-static int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
-                                u8 **data, size_t *len)
-{
-       struct efivar_entry *pnvm_efivar;
-       struct pnvm_sku_package *package;
-       unsigned long package_size;
-       int err;
-
-       pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
-       if (!pnvm_efivar)
-               return -ENOMEM;
-
-       memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
-              sizeof(IWL_UEFI_OEM_PNVM_NAME));
-       pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
-
-       /*
-        * TODO: we hardcode a maximum length here, because reading
-        * from the UEFI is not working.  To implement this properly,
-        * we have to call efivar_entry_size().
-        */
-       package_size = IWL_HARDCODED_PNVM_SIZE;
-
-       package = kmalloc(package_size, GFP_KERNEL);
-       if (!package) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package);
-       if (err) {
-               IWL_DEBUG_FW(trans,
-                            "PNVM UEFI variable not found %d (len %lu)\n",
-                            err, package_size);
-               goto out;
-       }
-
-       IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %lu\n", package_size);
-
-       *data = kmemdup(package->data, *len, GFP_KERNEL);
-       if (!*data)
-               err = -ENOMEM;
-       *len = package_size - sizeof(*package);
-
-out:
-       kfree(package);
-       kfree(pnvm_efivar);
-
-       return err;
-}
-#else /* CONFIG_EFI */
-static inline int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
-                                       u8 **data, size_t *len)
-{
-       return -EOPNOTSUPP;
-}
-#endif /* CONFIG_EFI */
-
 static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
 {
        const struct firmware *pnvm;
@@ -335,6 +258,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
 {
        u8 *data;
        size_t len;
+       struct pnvm_sku_package *package;
        struct iwl_notification_wait pnvm_wait;
        static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
                                                PNVM_INIT_COMPLETE_NTFY) };
@@ -356,9 +280,19 @@ int iwl_pnvm_load(struct iwl_trans *trans,
        }
 
        /* First attempt to get the PNVM from BIOS */
-       ret = iwl_pnvm_get_from_efi(trans, &data, &len);
-       if (!ret)
-               goto parse;
+       package = iwl_uefi_get_pnvm(trans, &len);
+       if (!IS_ERR_OR_NULL(package)) {
+               data = kmemdup(package->data, len, GFP_KERNEL);
+
+               /* free package regardless of whether kmemdup succeeded */
+               kfree(package);
+
+               if (data) {
+                       /* we need only the data size */
+                       len -= sizeof(*package);
+                       goto parse;
+               }
+       }
 
        /* If it's not available, try from the filesystem */
        ret = iwl_pnvm_get_from_fs(trans, &data, &len);
@@ -379,6 +313,30 @@ parse:
        kfree(data);
 
 skip_parse:
+       data = NULL;
+       /* now try to get the reduce power table, if not loaded yet */
+       if (!trans->reduce_power_loaded) {
+               data = iwl_uefi_get_reduced_power(trans, &len);
+               if (IS_ERR_OR_NULL(data)) {
+                       /*
+                        * Pretend we've loaded it - at least we've tried and
+                        * couldn't load it at all, so there's no point in
+                        * trying again over and over.
+                        */
+                       trans->reduce_power_loaded = true;
+
+                       goto skip_reduce_power;
+               }
+       }
+
+       ret = iwl_trans_set_reduce_power(trans, data, len);
+       if (ret)
+               IWL_DEBUG_FW(trans,
+                            "Failed to set reduce power table %d\n",
+                            ret);
+       kfree(data);
+
+skip_reduce_power:
        iwl_init_notification_wait(notif_wait, &pnvm_wait,
                                   ntf_cmds, ARRAY_SIZE(ntf_cmds),
                                   iwl_pnvm_complete_fn, trans);
index e4f91bc..61d3d4e 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /******************************************************************************
  *
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
  *
  *****************************************************************************/
 
@@ -10,7 +10,7 @@
 
 #include "fw/notif-wait.h"
 
-#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
+#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
 
 int iwl_pnvm_load(struct iwl_trans *trans,
                  struct iwl_notif_wait_data *notif_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
new file mode 100644 (file)
index 0000000..a7c79d8
--- /dev/null
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "iwl-drv.h"
+#include "pnvm.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+
+#include "fw/uefi.h"
+#include "fw/api/alive.h"
+#include <linux/efi.h>
+
+#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b,  \
+                                 0xb2, 0xec, 0xf5, 0xa3,       \
+                                 0x59, 0x4f, 0x4a, 0xea)
+
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+       struct efivar_entry *pnvm_efivar;
+       void *data;
+       unsigned long package_size;
+       int err;
+
+       *len = 0;
+
+       pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
+       if (!pnvm_efivar)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
+              sizeof(IWL_UEFI_OEM_PNVM_NAME));
+       pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+       /*
+        * TODO: we hardcode a maximum length here, because reading
+        * from the UEFI is not working.  To implement this properly,
+        * we have to call efivar_entry_size().
+        */
+       package_size = IWL_HARDCODED_PNVM_SIZE;
+
+       data = kmalloc(package_size, GFP_KERNEL);
+       if (!data) {
+               data = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
+       if (err) {
+               IWL_DEBUG_FW(trans,
+                            "PNVM UEFI variable not found %d (len %zd)\n",
+                            err, package_size);
+               kfree(data);
+               data = ERR_PTR(err);
+               goto out;
+       }
+
+       IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %zd\n", package_size);
+       *len = package_size;
+
+out:
+       kfree(pnvm_efivar);
+
+       return data;
+}
+
+static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
+                                          const u8 *data, size_t len)
+{
+       struct iwl_ucode_tlv *tlv;
+       u8 *reduce_power_data = NULL, *tmp;
+       u32 size = 0;
+
+       IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n");
+
+       while (len >= sizeof(*tlv)) {
+               u32 tlv_len, tlv_type;
+
+               len -= sizeof(*tlv);
+               tlv = (void *)data;
+
+               tlv_len = le32_to_cpu(tlv->length);
+               tlv_type = le32_to_cpu(tlv->type);
+
+               if (len < tlv_len) {
+                       IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+                               len, tlv_len);
+                       reduce_power_data = ERR_PTR(-EINVAL);
+                       goto out;
+               }
+
+               data += sizeof(*tlv);
+
+               switch (tlv_type) {
+               case IWL_UCODE_TLV_MEM_DESC: {
+                       IWL_DEBUG_FW(trans,
+                                    "Got IWL_UCODE_TLV_MEM_DESC len %d\n",
+                                    tlv_len);
+
+                       IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len);
+
+                       tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL);
+                       if (!tmp) {
+                               IWL_DEBUG_FW(trans,
+                                            "Couldn't allocate (more) reduce_power_data\n");
+
+                               reduce_power_data = ERR_PTR(-ENOMEM);
+                               goto out;
+                       }
+
+                       reduce_power_data = tmp;
+
+                       memcpy(reduce_power_data + size, data, tlv_len);
+
+                       size += tlv_len;
+
+                       break;
+               }
+               case IWL_UCODE_TLV_PNVM_SKU:
+                       IWL_DEBUG_FW(trans,
+                                    "New REDUCE_POWER section started, stop parsing.\n");
+                       goto done;
+               default:
+                       IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
+                                    tlv_type, tlv_len);
+                       break;
+               }
+
+               len -= ALIGN(tlv_len, 4);
+               data += ALIGN(tlv_len, 4);
+       }
+
+done:
+       if (!size) {
+               IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+               reduce_power_data = ERR_PTR(-ENOENT);
+               goto out;
+       }
+
+       IWL_INFO(trans, "loaded REDUCE_POWER\n");
+
+out:
+       return reduce_power_data;
+}
+
+static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
+                                        const u8 *data, size_t len)
+{
+       struct iwl_ucode_tlv *tlv;
+       void *sec_data;
+
+       IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
+
+       while (len >= sizeof(*tlv)) {
+               u32 tlv_len, tlv_type;
+
+               len -= sizeof(*tlv);
+               tlv = (void *)data;
+
+               tlv_len = le32_to_cpu(tlv->length);
+               tlv_type = le32_to_cpu(tlv->type);
+
+               if (len < tlv_len) {
+                       IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+                               len, tlv_len);
+                       return ERR_PTR(-EINVAL);
+               }
+
+               if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
+                       struct iwl_sku_id *sku_id =
+                               (void *)(data + sizeof(*tlv));
+
+                       IWL_DEBUG_FW(trans,
+                                    "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
+                                    tlv_len);
+                       IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
+                                    le32_to_cpu(sku_id->data[0]),
+                                    le32_to_cpu(sku_id->data[1]),
+                                    le32_to_cpu(sku_id->data[2]));
+
+                       data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+                       len -= ALIGN(tlv_len, 4);
+
+                       if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+                           trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+                           trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+                               sec_data = iwl_uefi_reduce_power_section(trans,
+                                                                        data,
+                                                                        len);
+                               if (!IS_ERR(sec_data))
+                                       return sec_data;
+                       } else {
+                               IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
+                       }
+               } else {
+                       data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+                       len -= ALIGN(tlv_len, 4);
+               }
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+       struct efivar_entry *reduce_power_efivar;
+       struct pnvm_sku_package *package;
+       void *data = NULL;
+       unsigned long package_size;
+       int err;
+
+       *len = 0;
+
+       reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL);
+       if (!reduce_power_efivar)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME,
+              sizeof(IWL_UEFI_REDUCED_POWER_NAME));
+       reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
+
+       /*
+        * TODO: we hardcode a maximum length here, because reading
+        * from the UEFI is not working.  To implement this properly,
+        * we have to call efivar_entry_size().
+        */
+       package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
+
+       package = kmalloc(package_size, GFP_KERNEL);
+       if (!package) {
+               package = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package);
+       if (err) {
+               IWL_DEBUG_FW(trans,
+                            "Reduced Power UEFI variable not found %d (len %lu)\n",
+                            err, package_size);
+               kfree(package);
+               data = ERR_PTR(err);
+               goto out;
+       }
+
+       IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
+                    package_size);
+       *len = package_size;
+
+       IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
+                    package->rev, package->total_size, package->n_skus);
+
+       data = iwl_uefi_reduce_power_parse(trans, package->data,
+                                          *len - sizeof(*package));
+
+       kfree(package);
+
+out:
+       kfree(reduce_power_efivar);
+
+       return data;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
new file mode 100644 (file)
index 0000000..45d0b36
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+
+#define IWL_UEFI_OEM_PNVM_NAME         L"UefiCnvWlanOemSignedPnvm"
+#define IWL_UEFI_REDUCED_POWER_NAME    L"UefiCnvWlanReducedPower"
+
+/*
+ * TODO: we have these hardcoded values that the caller must pass,
+ * because reading from the UEFI is not working.  To implement this
+ * properly, we have to change iwl_pnvm_get_from_uefi() to call
+ * efivar_entry_size() and return the value to the caller instead.
+ */
+#define IWL_HARDCODED_PNVM_SIZE                4096
+#define IWL_HARDCODED_REDUCE_POWER_SIZE        32768
+
+struct pnvm_sku_package {
+       u8 rev;
+       u32 total_size;
+       u8 n_skus;
+       u32 reserved[2];
+       u8 data[];
+} __packed;
+
+#ifdef CONFIG_EFI
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
+#else /* CONFIG_EFI */
+static inline
+void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline
+void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_EFI */
index b35ffdf..bf6ee56 100644 (file)
@@ -426,6 +426,7 @@ struct iwl_cfg {
 #define IWL_CFG_RF_TYPE_HR1            0x10C
 #define IWL_CFG_RF_TYPE_GF             0x10D
 #define IWL_CFG_RF_TYPE_MR             0x110
+#define IWL_CFG_RF_TYPE_FM             0x112
 
 #define IWL_CFG_RF_ID_TH               0x1
 #define IWL_CFG_RF_ID_TH1              0x1
@@ -505,8 +506,11 @@ extern const char iwl_ax201_killer_1650s_name[];
 extern const char iwl_ax201_killer_1650i_name[];
 extern const char iwl_ax210_killer_1675w_name[];
 extern const char iwl_ax210_killer_1675x_name[];
+extern const char iwl9560_killer_1550i_160_name[];
+extern const char iwl9560_killer_1550s_160_name[];
 extern const char iwl_ax211_name[];
 extern const char iwl_ax221_name[];
+extern const char iwl_ax231_name[];
 extern const char iwl_ax411_name[];
 #if IS_ENABLED(CONFIG_IWLDVM)
 extern const struct iwl_cfg iwl5300_agn_cfg;
@@ -586,7 +590,6 @@ extern const struct iwl_cfg iwl_qu_b0_hr_b0;
 extern const struct iwl_cfg iwl_qu_c0_hr_b0;
 extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
-extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
 extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
 extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
@@ -613,6 +616,7 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
 extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
 extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
 extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
index 2be605c..e1fec23 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018, 2020 Intel Corporation
+ * Copyright (C) 2018, 2020-2021 Intel Corporation
  */
 #ifndef __iwl_context_info_file_gen3_h__
 #define __iwl_context_info_file_gen3_h__
@@ -128,6 +128,17 @@ struct iwl_prph_scratch_rbd_cfg {
 } __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
 
 /*
+ * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
+ * @base_addr: reduce power table address
+ * @size: table size in dwords
+ */
+struct iwl_prph_scratch_uefi_cfg {
+       __le64 base_addr;
+       __le32 size;
+       __le32 reserved;
+} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
+
+/*
  * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
  * @version: version information of context info and HW
  * @control: control flags of FH configurations
@@ -141,6 +152,7 @@ struct iwl_prph_scratch_ctrl_cfg {
        struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
        struct iwl_prph_scratch_hwm_cfg hwm_cfg;
        struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+       struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
 } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
 
 /*
@@ -151,7 +163,7 @@ struct iwl_prph_scratch_ctrl_cfg {
  */
 struct iwl_prph_scratch {
        struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
-       __le32 reserved[16];
+       __le32 reserved[12];
        struct iwl_context_info_dram dram;
 } __packed; /* PERIPH_SCRATCH_S */
 
@@ -245,9 +257,11 @@ struct iwl_context_info_gen3 {
 
 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                                 const struct fw_img *fw);
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
 
 int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
                                          const void *data, u32 len);
+int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+                                                 const void *data, u32 len);
 
 #endif /* __iwl_context_info_file_gen3_h__ */
index db312ab..47e5a17 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2016 Intel Deutschland GmbH
  */
@@ -325,9 +325,6 @@ enum {
 #define CSR_HW_RF_ID_TYPE_GF           (0x0010D000)
 #define CSR_HW_RF_ID_TYPE_GF4          (0x0010E000)
 
-/* HW_RF CHIP ID  */
-#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
-
 /* HW_RF CHIP STEP  */
 #define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
 
index 4cd8c39..0ddd255 100644 (file)
@@ -57,7 +57,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
        [IWL_DBG_TLV_TYPE_DEBUG_INFO]   = {.min_ver = 1, .max_ver = 1,},
        [IWL_DBG_TLV_TYPE_BUF_ALLOC]    = {.min_ver = 1, .max_ver = 1,},
        [IWL_DBG_TLV_TYPE_HCMD]         = {.min_ver = 1, .max_ver = 1,},
-       [IWL_DBG_TLV_TYPE_REGION]       = {.min_ver = 1, .max_ver = 1,},
+       [IWL_DBG_TLV_TYPE_REGION]       = {.min_ver = 1, .max_ver = 2,},
        [IWL_DBG_TLV_TYPE_TRIGGER]      = {.min_ver = 1, .max_ver = 1,},
 };
 
@@ -178,9 +178,20 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
        u32 type = le32_to_cpu(reg->type);
        u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
 
+       /*
+        * The higher part of the ID in version 2 is irrelevant for
+        * us, so mask it out.
+        */
+       if (le32_to_cpu(reg->hdr.version) == 2)
+               id &= IWL_FW_INI_REGION_V2_MASK;
+
        if (le32_to_cpu(tlv->length) < sizeof(*reg))
                return -EINVAL;
 
+       /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
+       IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
+                    IWL_FW_INI_MAX_NAME, reg->name);
+
        if (id >= IWL_FW_INI_MAX_REGION_ID) {
                IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
                return -EINVAL;
index 884750b..977dce6 100644 (file)
@@ -1117,6 +1117,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                IWL_ERROR_EVENT_TABLE_LMAC1;
                        break;
                        }
+               case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
+                       struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data;
+
+                       if (tlv_len != sizeof(*ptr))
+                               goto invalid_tlv_len;
+                       drv->trans->dbg.tcm_error_event_table =
+                               le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
+                       drv->trans->dbg.error_event_table_tlv_status |=
+                               IWL_ERROR_EVENT_TABLE_TCM;
+                       break;
+                       }
                case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
                case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
                case IWL_UCODE_TLV_TYPE_HCMD:
index fc75d04..850648e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -549,8 +549,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                .mac_cap_info[2] =
                                        IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
                                .mac_cap_info[3] =
-                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
-                                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
+                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
                                .mac_cap_info[4] =
                                        IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU |
                                        IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
@@ -579,25 +578,20 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                        IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
                                        IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
                                        IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
-                               .phy_cap_info[5] =
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
                                .phy_cap_info[6] =
                                        IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
                                        IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
                                        IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
                                .phy_cap_info[7] =
                                        IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
-                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
-                                       IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
                                .phy_cap_info[8] =
                                        IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
                                        IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
                                        IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
                                        IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
-                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
+                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
                                .phy_cap_info[9] =
-                                       IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
                                        IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
                                        IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
                                        IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
@@ -632,19 +626,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                .mac_cap_info[1] =
                                        IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
                                        IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
-                               .mac_cap_info[2] =
-                                       IEEE80211_HE_MAC_CAP2_BSR,
                                .mac_cap_info[3] =
-                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
-                                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
-                               .mac_cap_info[4] =
-                                       IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
-                               .mac_cap_info[5] =
-                                       IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
+                                       IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
                                .phy_cap_info[0] =
                                        IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
-                                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
-                                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+                                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
                                .phy_cap_info[1] =
                                        IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
                                .phy_cap_info[2] =
@@ -654,27 +640,14 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
                                        IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
-                               .phy_cap_info[4] =
-                                       IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
-                                       IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
-                                       IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
-                               .phy_cap_info[5] =
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
-                                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
                                .phy_cap_info[6] =
                                        IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
                                .phy_cap_info[7] =
-                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
-                                       IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+                                       IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
                                .phy_cap_info[8] =
                                        IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
-                                       IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
-                                       IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
-                                       IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
-                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
+                                       IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
                                .phy_cap_info[9] =
-                                       IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
-                                       IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
                                        IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
                        },
                        /*
@@ -745,12 +718,72 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
                iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
 }
 
+static void
+iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
+                        struct ieee80211_supported_band *sband,
+                        struct ieee80211_sband_iftype_data *iftype_data,
+                        u8 tx_chains, u8 rx_chains,
+                        const struct iwl_fw *fw)
+{
+       bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
+
+       /* Advertise an A-MPDU exponent extension based on
+        * operating band
+        */
+       if (sband->band != NL80211_BAND_2GHZ)
+               iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
+                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1;
+       else
+               iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
+                       IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
+
+       if (is_ap && iwlwifi_mod_params.nvm_file)
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
+                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+       if ((tx_chains & rx_chains) == ANT_AB) {
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
+                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+                       IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
+               if (!is_ap)
+                       iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
+                               IEEE80211_HE_PHY_CAP7_MAX_NC_2;
+       } else if (!is_ap) {
+               /* If not 2x2, we need to indicate 1x1 in the
+                * Midamble RX Max NSTS - but not for AP mode
+                */
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
+                       ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
+                       ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
+                       IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+       }
+
+       switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+       case IWL_CFG_RF_TYPE_GF:
+       case IWL_CFG_RF_TYPE_MR:
+               iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
+                       IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+               if (!is_ap)
+                       iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
+                               IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+               break;
+       }
+
+       if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
+               iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
+                       IEEE80211_HE_MAC_CAP2_BCAST_TWT;
+}
+
 static void iwl_init_he_hw_capab(struct iwl_trans *trans,
                                 struct iwl_nvm_data *data,
                                 struct ieee80211_supported_band *sband,
-                                u8 tx_chains, u8 rx_chains)
+                                u8 tx_chains, u8 rx_chains,
+                                const struct iwl_fw *fw)
 {
        struct ieee80211_sband_iftype_data *iftype_data;
+       int i;
 
        /* should only initialize once */
        if (WARN_ON(sband->iftype_data))
@@ -777,26 +810,18 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
        sband->iftype_data = iftype_data;
        sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
 
-       /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
-       if ((tx_chains & rx_chains) != ANT_AB) {
-               int i;
-
-               for (i = 0; i < sband->n_iftype_data; i++) {
-                       iftype_data[i].he_cap.he_cap_elem.phy_cap_info[1] &=
-                               ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
-                       iftype_data[i].he_cap.he_cap_elem.phy_cap_info[2] &=
-                               ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
-                       iftype_data[i].he_cap.he_cap_elem.phy_cap_info[7] &=
-                               ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
-               }
-       }
+       for (i = 0; i < sband->n_iftype_data; i++)
+               iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i],
+                                        tx_chains, rx_chains, fw);
+
        iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);
 }
 
 static void iwl_init_sbands(struct iwl_trans *trans,
                            struct iwl_nvm_data *data,
                            const void *nvm_ch_flags, u8 tx_chains,
-                           u8 rx_chains, u32 sbands_flags, bool v4)
+                           u8 rx_chains, u32 sbands_flags, bool v4,
+                           const struct iwl_fw *fw)
 {
        struct device *dev = trans->dev;
        const struct iwl_cfg *cfg = trans->cfg;
@@ -816,7 +841,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
                             tx_chains, rx_chains);
 
        if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
-               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+                                    fw);
 
        sband = &data->bands[NL80211_BAND_5GHZ];
        sband->band = NL80211_BAND_5GHZ;
@@ -831,7 +857,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
                                      tx_chains, rx_chains);
 
        if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
-               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+                                    fw);
 
        /* 6GHz band. */
        sband = &data->bands[NL80211_BAND_6GHZ];
@@ -843,7 +870,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
                                          NL80211_BAND_6GHZ);
 
        if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
-               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
+               iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+                                    fw);
        else
                sband->n_channels = 0;
        if (n_channels != n_used)
@@ -1154,7 +1182,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
 
        iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains,
-                       sbands_flags, false);
+                       sbands_flags, false, fw);
        data->calib_version = 255;
 
        return data;
@@ -1661,7 +1689,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
                        channel_profile,
                        nvm->valid_tx_ant & fw->valid_tx_ant,
                        nvm->valid_rx_ant & fw->valid_rx_ant,
-                       sbands_flags, v4);
+                       sbands_flags, v4, fw);
 
        iwl_free_resp(&hcmd);
        return nvm;
index 3ce77e4..9a9e714 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016 Intel Deutschland GmbH
  */
@@ -412,6 +412,8 @@ enum {
 #define UREG_DOORBELL_TO_ISR6_RESUME   BIT(19)
 #define UREG_DOORBELL_TO_ISR6_PNVM     BIT(20)
 
+#define CNVI_MBOX_C                    0xA3400C
+
 #define FSEQ_ERROR_CODE                        0xA340C8
 #define FSEQ_TOP_INIT_VERSION          0xA34038
 #define FSEQ_CNVIO_INIT_VERSION                0xA3403C
index bf569f8..0199d7a 100644 (file)
@@ -193,6 +193,7 @@ enum iwl_error_event_table_status {
        IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
        IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
        IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
+       IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
 };
 
 /**
@@ -589,6 +590,8 @@ struct iwl_trans_ops {
        void (*debugfs_cleanup)(struct iwl_trans *trans);
        void (*sync_nmi)(struct iwl_trans *trans);
        int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
+       int (*set_reduce_power)(struct iwl_trans *trans,
+                               const void *data, u32 len);
        void (*interrupts)(struct iwl_trans *trans, bool enable);
 };
 
@@ -706,6 +709,7 @@ struct iwl_self_init_dram {
  * @trigger_tlv: array of pointers to triggers TLVs for debug
  * @lmac_error_event_table: addrs of lmacs error tables
  * @umac_error_event_table: addr of umac error table
+ * @tcm_error_event_table: address of TCM error table
  * @error_event_table_tlv_status: bitmap that indicates what error table
  *     pointers was recevied via TLV. uses enum &iwl_error_event_table_status
  * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
@@ -732,6 +736,7 @@ struct iwl_trans_debug {
 
        u32 lmac_error_event_table[2];
        u32 umac_error_event_table;
+       u32 tcm_error_event_table;
        unsigned int error_event_table_tlv_status;
 
        enum iwl_ini_cfg_state internal_ini_cfg;
@@ -957,6 +962,7 @@ struct iwl_trans {
        bool pm_support;
        bool ltr_enabled;
        u8 pnvm_loaded:1;
+       u8 reduce_power_loaded:1;
 
        const struct iwl_hcmd_arr *command_groups;
        int command_groups_size;
@@ -1420,6 +1426,20 @@ static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
        return 0;
 }
 
+static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
+                                            const void *data, u32 len)
+{
+       if (trans->ops->set_reduce_power) {
+               int ret = trans->ops->set_reduce_power(trans, data, len);
+
+               if (ret)
+                       return ret;
+       }
+
+       trans->reduce_power_loaded = true;
+       return 0;
+}
+
 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
 {
        return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
index 2e28cf2..6a259d8 100644 (file)
@@ -104,7 +104,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
 struct wowlan_key_data {
        struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
        struct iwl_wowlan_tkip_params_cmd *tkip;
-       struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
+       struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
        bool error, use_rsc_tsc, use_tkip, configure_keys;
        int wep_key_idx;
 };
@@ -393,14 +393,19 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
 }
 
 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif,
                                 struct cfg80211_wowlan *wowlan)
 {
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_wowlan_patterns_cmd *pattern_cmd;
        struct iwl_host_cmd cmd = {
                .id = WOWLAN_PATTERNS,
                .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
        };
        int i, err;
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                       WOWLAN_PATTERNS,
+                                       IWL_FW_CMD_VER_UNKNOWN);
 
        if (!wowlan->n_patterns)
                return 0;
@@ -408,11 +413,13 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
        cmd.len[0] = sizeof(*pattern_cmd) +
                wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
 
-       pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+       pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
        if (!pattern_cmd)
                return -ENOMEM;
 
-       pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+       pattern_cmd->n_patterns = wowlan->n_patterns;
+       if (ver >= 3)
+               pattern_cmd->sta_id = mvmvif->ap_sta_id;
 
        for (i = 0; i < wowlan->n_patterns; i++) {
                int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -636,7 +643,6 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
                          struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
                          struct ieee80211_sta *ap_sta)
 {
-       int ret;
        struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
 
        /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
@@ -646,12 +652,16 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
        wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
                ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
 
-       /* Query the last used seqno and set it */
-       ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
-       if (ret < 0)
-               return ret;
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                 WOWLAN_CONFIGURATION, 0) < 6) {
+               /* Query the last used seqno and set it */
+               int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
 
-       wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+               if (ret < 0)
+                       return ret;
+
+               wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+       }
 
        iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
 
@@ -706,7 +716,8 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                                            struct ieee80211_vif *vif,
                                            u32 cmd_flags)
 {
-       struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
+       struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
+       struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd;
        struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
        bool unified = fw_has_capa(&mvm->fw->ucode_capa,
                                   IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -715,7 +726,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                .use_rsc_tsc = false,
                .tkip = &tkip_cmd,
                .use_tkip = false,
-               .kek_kck_cmd = &kek_kck_cmd,
+               .kek_kck_cmd = _kek_kck_cmd,
        };
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
@@ -809,13 +820,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                                                IWL_ALWAYS_LONG_GROUP,
                                                WOWLAN_KEK_KCK_MATERIAL,
                                                IWL_FW_CMD_VER_UNKNOWN);
-               if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
+               if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
                            cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
                        return -EINVAL;
-               if (cmd_ver == 3)
-                       cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
-               else
-                       cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
 
                memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
                       mvmvif->rekey_data.kck_len);
@@ -825,6 +832,21 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
                kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
                kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
+               kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id);
+
+               if (cmd_ver == 4) {
+                       cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
+               } else {
+                       if (cmd_ver == 3)
+                               cmd_size =
+                                       sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
+                       else
+                               cmd_size =
+                                       sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
+                       /* skip the sta_id at the beginning */
+                       _kek_kck_cmd = (void *)
+                               ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
+               }
 
                IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
                                 mvmvif->rekey_data.akm);
@@ -832,7 +854,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
                ret = iwl_mvm_send_cmd_pdu(mvm,
                                           WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
                                           cmd_size,
-                                          &kek_kck_cmd);
+                                          _kek_kck_cmd);
                if (ret)
                        goto out;
        }
@@ -884,7 +906,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
 
        if (fw_has_api(&mvm->fw->ucode_capa,
                       IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
-               ret = iwl_mvm_send_patterns(mvm, wowlan);
+               ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
        else
                ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
        if (ret)
@@ -1534,9 +1556,12 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
        }
 
 out:
-       mvmvif->seqno_valid = true;
-       /* +0x10 because the set API expects next-to-use, not last-used */
-       mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+       if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+                                   WOWLAN_GET_STATUSES, 0) < 10) {
+               mvmvif->seqno_valid = true;
+               /* +0x10 because the set API expects next-to-use, not last-used */
+               mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+       }
 
        return true;
 }
@@ -1587,15 +1612,27 @@ iwl_mvm_parse_wowlan_status_common(v6)
 iwl_mvm_parse_wowlan_status_common(v7)
 iwl_mvm_parse_wowlan_status_common(v9)
 
-struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
+static struct iwl_wowlan_status *
+iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
 {
        struct iwl_wowlan_status *status;
+       struct iwl_wowlan_get_status_cmd get_status_cmd = {
+               .sta_id = cpu_to_le32(sta_id),
+       };
        struct iwl_host_cmd cmd = {
                .id = WOWLAN_GET_STATUSES,
                .flags = CMD_WANT_SKB,
+               .data = { &get_status_cmd, },
+               .len = { sizeof(get_status_cmd), },
        };
        int ret, len;
        u8 notif_ver;
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                          WOWLAN_GET_STATUSES,
+                                          IWL_FW_CMD_VER_UNKNOWN);
+
+       if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+               cmd.len[0] = 0;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1608,8 +1645,11 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
        len = iwl_rx_packet_payload_len(cmd.resp_pkt);
 
        /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
-       notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
-                                           WOWLAN_GET_STATUSES, 7);
+       notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+                                           WOWLAN_GET_STATUSES, 0);
+       if (!notif_ver)
+               notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+                                                   WOWLAN_GET_STATUSES, 7);
 
        if (!fw_has_api(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
@@ -1654,7 +1694,7 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
 
                status->gtk[0] = v7->gtk[0];
                status->igtk[0] = v7->igtk[0];
-       } else if (notif_ver == 9) {
+       } else if (notif_ver == 9 || notif_ver == 10) {
                struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
 
                status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
@@ -1680,29 +1720,37 @@ out_free_resp:
 }
 
 static struct iwl_wowlan_status *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
 {
-       int ret;
-
-       /* only for tracing for now */
-       ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
-       if (ret)
-               IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                          OFFLOADS_QUERY_CMD,
+                                          IWL_FW_CMD_VER_UNKNOWN);
+       __le32 station_id = cpu_to_le32(sta_id);
+       u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
+
+       if (!mvm->net_detect) {
+               /* only for tracing for now */
+               int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
+                                              cmd_size, &station_id);
+               if (ret)
+                       IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+       }
 
-       return iwl_mvm_send_wowlan_get_status(mvm);
+       return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
 }
 
 /* releases the MVM mutex */
 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *vif)
 {
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_wowlan_status_data status;
        struct iwl_wowlan_status *fw_status;
        int i;
        bool keep;
        struct iwl_mvm_sta *mvm_ap_sta;
 
-       fw_status = iwl_mvm_get_wakeup_status(mvm);
+       fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
        if (IS_ERR_OR_NULL(fw_status))
                goto out_unlock;
 
@@ -1880,7 +1928,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        u32 reasons = 0;
        int i, n_matches, ret;
 
-       fw_status = iwl_mvm_get_wakeup_status(mvm);
+       fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
        if (!IS_ERR_OR_NULL(fw_status)) {
                reasons = le32_to_cpu(fw_status->wakeup_reasons);
                kfree(fw_status);
index 38d0bfb..7d9faef 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -460,7 +460,7 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
        int pos = 0;
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+       iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL);
        mutex_unlock(&mvm->mutex);
 
        do_div(curr_os, NSEC_PER_USEC);
index 63d6501..95f883a 100644 (file)
@@ -1023,7 +1023,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
                mvm->fw_restart++;
 
        /* take the return value to make compiler happy - it will fail anyway */
-       ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  WIDE_ID(LONG_GROUP, REPLY_ERROR),
+                                  0, 0, NULL);
 
        mutex_unlock(&mvm->mutex);
 
index a456b8a..59cef0d 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/etherdevice.h>
 #include <linux/math64.h>
@@ -430,6 +430,10 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
                FTM_PUT_FLAG(TB);
        else if (peer->ftm.non_trigger_based)
                FTM_PUT_FLAG(NON_TB);
+
+       if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
+           peer->ftm.lmr_feedback)
+               FTM_PUT_FLAG(LMR_FEEDBACK);
 }
 
 static int
@@ -879,7 +883,8 @@ static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
        u32 curr_gp2, diff;
        u64 now_from_boot_ns;
 
-       iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns);
+       iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
+                             &now_from_boot_ns, NULL);
 
        if (curr_gp2 >= gp2_ts)
                diff = curr_gp2 - gp2_ts;
index 8aa5f1a..38fd588 100644 (file)
@@ -1139,19 +1139,34 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
 
 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
 {
-       int cmd_ret;
-       struct iwl_lari_config_change_cmd_v3 cmd = {};
+       int ret;
+       u32 value;
+       struct iwl_lari_config_change_cmd_v4 cmd = {};
 
        cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
 
+       ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, DSM_FUNC_11AX_ENABLEMENT,
+                                  &iwl_guid, &value);
+       if (!ret)
+               cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
        /* apply more config masks here */
 
-       if (cmd.config_bitmap) {
+       ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0,
+                                  DSM_FUNC_ENABLE_UNII4_CHAN,
+                                  &iwl_guid, &value);
+       if (!ret)
+               cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
+
+       if (cmd.config_bitmap ||
+           cmd.oem_11ax_allow_bitmap ||
+           cmd.oem_unii4_allow_bitmap) {
                size_t cmd_size;
                u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
                                                   REGULATORY_AND_NVM_GROUP,
                                                   LARI_CONFIG_CHANGE, 1);
-               if (cmd_ver == 3)
+               if (cmd_ver == 4)
+                       cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
+               else if (cmd_ver == 3)
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
                else if (cmd_ver == 2)
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
@@ -1159,16 +1174,21 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
                        cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
 
                IWL_DEBUG_RADIO(mvm,
-                               "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
-                               le32_to_cpu(cmd.config_bitmap));
-               cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
-                                              WIDE_ID(REGULATORY_AND_NVM_GROUP,
-                                                      LARI_CONFIG_CHANGE),
-                                              0, cmd_size, &cmd);
-               if (cmd_ret < 0)
+                               "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+                               le32_to_cpu(cmd.config_bitmap),
+                               le32_to_cpu(cmd.oem_11ax_allow_bitmap));
+               IWL_DEBUG_RADIO(mvm,
+                               "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, cmd_ver=%d\n",
+                               le32_to_cpu(cmd.oem_unii4_allow_bitmap),
+                               cmd_ver);
+               ret = iwl_mvm_send_cmd_pdu(mvm,
+                                          WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                                                  LARI_CONFIG_CHANGE),
+                                          0, cmd_size, &cmd);
+               if (ret < 0)
                        IWL_DEBUG_RADIO(mvm,
                                        "Failed to send LARI_CONFIG_CHANGE (%d)\n",
-                                       cmd_ret);
+                                       ret);
        }
 }
 #else /* CONFIG_ACPI */
index 607d5d5..70ebecb 100644 (file)
@@ -3306,14 +3306,14 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
 
 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
-                                      u16 req_duration)
+                                      struct ieee80211_prep_tx_info *info)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
        u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
 
-       if (req_duration > duration)
-               duration = req_duration;
+       if (info->duration > duration)
+               duration = info->duration;
 
        mutex_lock(&mvm->mutex);
        /* Try really hard to protect the session and hear a beacon
@@ -3800,6 +3800,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct cfg80211_chan_def chandef;
        struct iwl_mvm_phy_ctxt *phy_ctxt;
+       bool band_change_removal;
        int ret, i;
 
        IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
@@ -3880,19 +3881,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
        cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
 
        /*
-        * Change the PHY context configuration as it is currently referenced
-        * only by the P2P Device MAC
+        * Check if the remain-on-channel is on a different band and that
+        * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
+        * so, we'll need to release and then re-configure here, since we
+        * must not remove a PHY context that's part of a binding.
         */
-       if (mvmvif->phy_ctxt->ref == 1) {
+       band_change_removal =
+               fw_has_capa(&mvm->fw->ucode_capa,
+                           IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+               mvmvif->phy_ctxt->channel->band != chandef.chan->band;
+
+       if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
+               /*
+                * Change the PHY context configuration as it is currently
+                * referenced only by the P2P Device MAC (and we can modify it)
+                */
                ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
                                               &chandef, 1, 1);
                if (ret)
                        goto out_unlock;
        } else {
                /*
-                * The PHY context is shared with other MACs. Need to remove the
-                * P2P Device from the binding, allocate an new PHY context and
-                * create a new binding
+                * The PHY context is shared with other MACs (or we're trying to
+                * switch bands), so remove the P2P Device from the binding,
+                * allocate an new PHY context and create a new binding.
                 */
                phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
                if (!phy_ctxt) {
@@ -4211,7 +4223,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
        struct ieee80211_vif *disabled_vif = NULL;
 
        lockdep_assert_held(&mvm->mutex);
-
        iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
 
        switch (vif->type) {
index 4d9d4d6..b50942f 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/thermal.h>
 #endif
 
+#include <linux/ktime.h>
+
 #include "iwl-op-mode.h"
 #include "iwl-trans.h"
 #include "fw/notif-wait.h"
@@ -195,6 +197,7 @@ enum iwl_mvm_smps_type_request {
        IWL_MVM_SMPS_REQ_BT_COEX,
        IWL_MVM_SMPS_REQ_TT,
        IWL_MVM_SMPS_REQ_PROT,
+       IWL_MVM_SMPS_REQ_FW,
        NUM_IWL_MVM_SMPS_REQ,
 };
 
@@ -991,6 +994,8 @@ struct iwl_mvm {
         */
        bool temperature_test;  /* Debug test temperature is enabled */
 
+       bool fw_static_smps_request;
+
        unsigned long bt_coex_last_tcm_ts;
        struct iwl_mvm_tcm tcm;
 
@@ -1447,10 +1452,16 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
                               struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+
+static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+       iwl_fwrt_dump_error_logs(&mvm->fwrt);
+}
+
 u8 first_antenna(u8 mask);
 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
-void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
+                          u64 *boottime, ktime_t *realtime);
 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
 
 /* Tx / Host Commands */
@@ -1769,7 +1780,6 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif, int idx);
 extern const struct file_operations iwl_dbgfs_d3_test_ops;
-struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm);
 #ifdef CONFIG_PM
 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
                                 struct ieee80211_vif *vif);
@@ -1827,7 +1837,9 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                enum iwl_mvm_smps_type_request req_type,
                                enum ieee80211_smps_mode smps_request);
-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif);
 
 /* Low latency */
 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
index 1cc90e6..4188051 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2012-2014, 2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015 Intel Deutschland GmbH
  */
@@ -36,7 +36,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                struct iwl_proto_offload_cmd_v1 v1;
                struct iwl_proto_offload_cmd_v2 v2;
                struct iwl_proto_offload_cmd_v3_small v3s;
-               struct iwl_proto_offload_cmd_v3_large v3l;
+               struct iwl_proto_offload_cmd_v4 v4;
        } cmd = {};
        struct iwl_host_cmd hcmd = {
                .id = PROT_OFFLOAD_CONFIG_CMD,
@@ -47,6 +47,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
        struct iwl_proto_offload_cmd_common *common;
        u32 enabled = 0, size;
        u32 capa_flags = mvm->fw->ucode_capa.flags;
+       int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+                                       PROT_OFFLOAD_CONFIG_CMD, 0);
+
 #if IS_ENABLED(CONFIG_IPV6)
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int i;
@@ -72,9 +75,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                        addrs = cmd.v3s.targ_addrs;
                        n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
                } else {
-                       nsc = cmd.v3l.ns_config;
+                       nsc = cmd.v4.ns_config;
                        n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
-                       addrs = cmd.v3l.targ_addrs;
+                       addrs = cmd.v4.targ_addrs;
                        n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
                }
 
@@ -116,7 +119,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                        cmd.v3s.num_valid_ipv6_addrs =
                                cpu_to_le32(i - num_skipped);
                else
-                       cmd.v3l.num_valid_ipv6_addrs =
+                       cmd.v4.num_valid_ipv6_addrs =
                                cpu_to_le32(i - num_skipped);
        } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
                bool found = false;
@@ -171,8 +174,17 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                common = &cmd.v3s.common;
                size = sizeof(cmd.v3s);
        } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
-               common = &cmd.v3l.common;
-               size = sizeof(cmd.v3l);
+               common = &cmd.v4.common;
+               size = sizeof(cmd.v4);
+               if (ver < 4) {
+                       /*
+                        * This basically uses iwl_proto_offload_cmd_v3_large
+                        * which doesn't have the sta_id parameter before the
+                        * common part.
+                        */
+                       size -= sizeof(cmd.v4.sta_id);
+                       hcmd.data[0] = common;
+               }
        } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
                common = &cmd.v2.common;
                size = sizeof(cmd.v2);
index ebed82c..20e8d34 100644 (file)
@@ -210,6 +210,39 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
        ieee80211_disconnect(vif, true);
 }
 
+void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+
+       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW,
+                           mvm->fw_static_smps_request ?
+                               IEEE80211_SMPS_STATIC :
+                               IEEE80211_SMPS_AUTOMATIC);
+}
+
+static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       iwl_mvm_apply_fw_smps_request(vif);
+}
+
+static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
+
+       /*
+        * We could pass it to the iterator data, but also need to remember
+        * it for new interfaces that are added while in this state.
+        */
+       mvm->fw_static_smps_request =
+               req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
+       ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                    iwl_mvm_intf_dual_chain_req, NULL);
+}
+
 /**
  * enum iwl_rx_handler_context context for Rx handler
  * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
@@ -358,6 +391,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
                       iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
                       struct iwl_datapath_monitor_notif),
+
+       RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
+                      iwl_mvm_rx_thermal_dual_chain_req,
+                      RX_HANDLER_ASYNC_LOCKED,
+                      struct iwl_thermal_dual_chain_request),
 };
 #undef RX_HANDLER
 #undef RX_HANDLER_GRP
@@ -445,7 +483,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(D3_CONFIG_CMD),
        HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
        HCMD_NAME(OFFLOADS_QUERY_CMD),
-       HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
        HCMD_NAME(MATCH_FOUND_NOTIFICATION),
        HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
        HCMD_NAME(WOWLAN_PATTERNS),
@@ -503,6 +540,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
        HCMD_NAME(TLC_MNG_CONFIG_CMD),
        HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
        HCMD_NAME(MONITOR_NOTIF),
+       HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
        HCMD_NAME(STA_PM_NOTIF),
        HCMD_NAME(MU_GROUP_MGMT_NOTIF),
        HCMD_NAME(RX_QUEUES_NOTIFICATION),
index 0fd51f6..035336a 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2017 Intel Deutschland GmbH
  */
@@ -76,6 +76,7 @@ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
 }
 
 static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+                                        struct iwl_mvm_phy_ctxt *ctxt,
                                         __le32 *rxchain_info,
                                         u8 chains_static,
                                         u8 chains_dynamic)
@@ -93,11 +94,22 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
         * between the two antennas is sufficiently different to impact
         * performance.
         */
-       if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+       if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) {
                idle_cnt = 2;
                active_cnt = 2;
        }
 
+       /*
+        * If the firmware requested it, then we know that it supports
+        * getting zero for the values to indicate "use one, but pick
+        * which one yourself", which means it can dynamically pick one
+        * that e.g. has better RSSI.
+        */
+       if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
+               idle_cnt = 0;
+               active_cnt = 0;
+       }
+
        *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
                                        PHY_RX_CHAIN_VALID_POS);
        *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -113,6 +125,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
  * Add the phy configuration to the PHY context command
  */
 static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+                                        struct iwl_mvm_phy_ctxt *ctxt,
                                         struct iwl_phy_context_cmd_v1 *cmd,
                                         struct cfg80211_chan_def *chandef,
                                         u8 chains_static, u8 chains_dynamic)
@@ -123,7 +136,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
        /* Set the channel info data */
        iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
 
-       iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
+       iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info,
                                     chains_static, chains_dynamic);
 
        tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -133,6 +146,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
  * Add the phy configuration to the PHY context command
  */
 static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+                                     struct iwl_mvm_phy_ctxt *ctxt,
                                      struct iwl_phy_context_cmd *cmd,
                                      struct cfg80211_chan_def *chandef,
                                      u8 chains_static, u8 chains_dynamic)
@@ -143,7 +157,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        /* Set the channel info data */
        iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
 
-       iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
+       iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
                                     chains_static, chains_dynamic);
 }
 
@@ -170,7 +184,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
                iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
 
                /* Set the command data */
-               iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+               iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef,
                                          chains_static,
                                          chains_dynamic);
 
@@ -186,7 +200,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
                                         action);
 
                /* Set the command data */
-               iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
+               iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef,
                                             chains_static,
                                             chains_dynamic);
                ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
index 8e26422..c0babb8 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -2001,8 +2001,10 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
        struct sk_buff *skb;
        u8 channel, energy_a, energy_b;
        struct iwl_mvm_rx_phy_data phy_data = {
+               .info_type = le32_get_bits(desc->phy_info[1],
+                                          IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
                .d0 = desc->phy_info[0],
-               .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
+               .d1 = desc->phy_info[1],
        };
 
        if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
@@ -2015,10 +2017,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
        energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
        channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
 
-       phy_data.info_type =
-               le32_get_bits(desc->phy_info[1],
-                             IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
-
        /* Dont use dev_alloc_skb(), we'll have enough headroom once
         * ieee80211_hdr pulled.
         */
index 5a0696c..0368b71 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -2327,9 +2327,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                             &scan_p->general_params,
                                             gen_flags);
 
-        ret = iwl_mvm_fill_scan_sched_params(params,
-                                             scan_p->periodic_params.schedule,
-                                             &scan_p->periodic_params.delay);
+       ret = iwl_mvm_fill_scan_sched_params(params,
+                                            scan_p->periodic_params.schedule,
+                                            &scan_p->periodic_params.delay);
        if (ret)
                return ret;
 
@@ -2362,9 +2362,9 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                             &scan_p->general_params,
                                             gen_flags);
 
-        ret = iwl_mvm_fill_scan_sched_params(params,
-                                             scan_p->periodic_params.schedule,
-                                             &scan_p->periodic_params.delay);
+       ret = iwl_mvm_fill_scan_sched_params(params,
+                                            scan_p->periodic_params.schedule,
+                                            &scan_p->periodic_params.delay);
        if (ret)
                return ret;
 
index f618368..9c45a64 100644 (file)
@@ -3794,8 +3794,12 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
 
        mvm_sta->disable_tx = disable;
 
-       /* Tell mac80211 to start/stop queuing tx for this station */
-       ieee80211_sta_block_awake(mvm->hw, sta, disable);
+       /*
+        * If sta PS state is handled by mac80211, tell it to start/stop
+        * queuing tx for this station.
+        */
+       if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
+               ieee80211_sta_block_awake(mvm->hw, sta, disable);
 
        iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
 
index 83342a6..d3307a1 100644 (file)
@@ -31,6 +31,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
                return;
 
        list_del(&te_data->list);
+
+       /*
+        * the list is only used for AUX ROC events so make sure it is always
+        * initialized
+        */
+       INIT_LIST_HEAD(&te_data->list);
+
        te_data->running = false;
        te_data->uid = 0;
        te_data->id = TE_MAX;
@@ -310,6 +317,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                         * and know the dtim period.
                         */
                        iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+                               !te_data->vif->bss_conf.assoc ?
+                               "Not associated and the time event is over already..." :
                                "No beacon heard and the time event is over already...");
                        break;
                default:
@@ -607,14 +616,15 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 }
 
 static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
-                                             struct iwl_mvm_vif *mvmvif)
+                                             struct iwl_mvm_vif *mvmvif,
+                                             u32 id)
 {
        struct iwl_mvm_session_prot_cmd cmd = {
                .id_and_color =
                        cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                        mvmvif->color)),
                .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
-               .conf_id = cpu_to_le32(mvmvif->time_event_data.id),
+               .conf_id = cpu_to_le32(id),
        };
        int ret;
 
@@ -632,6 +642,12 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
 {
        u32 id;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+       enum nl80211_iftype iftype;
+
+       if (!te_data->vif)
+               return false;
+
+       iftype = te_data->vif->type;
 
        /*
         * It is possible that by the time we got to this point the time
@@ -656,8 +672,8 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                        IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
                if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
                        /* Session protection is still ongoing. Cancel it */
-                       iwl_mvm_cancel_session_protection(mvm, mvmvif);
-                       if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+                       iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
+                       if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
                                set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
                                iwl_mvm_roc_finished(mvm);
                        }
@@ -738,11 +754,6 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                IWL_ERR(mvm, "Couldn't remove the time event\n");
 }
 
-/*
- * When the firmware supports the session protection API,
- * this is not needed since it'll automatically remove the
- * session protection after association + beacon reception.
- */
 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
                                     struct ieee80211_vif *vif)
 {
@@ -756,7 +767,15 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
        id = te_data->id;
        spin_unlock_bh(&mvm->time_event_lock);
 
-       if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+               if (id != SESSION_PROTECT_CONF_ASSOC) {
+                       IWL_DEBUG_TE(mvm,
+                                    "don't remove session protection id=%u\n",
+                                    id);
+                       return;
+               }
+       } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
                IWL_DEBUG_TE(mvm,
                             "don't remove TE with id=%u (not session protection)\n",
                             id);
@@ -808,6 +827,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
                         * and know the dtim period.
                         */
                        iwl_mvm_te_check_disconnect(mvm, vif,
+                                                   !vif->bss_conf.assoc ?
+                                                   "Not associated and the session protection is over already..." :
                                                    "No beacon heard and the session protection is over already...");
                        spin_lock_bh(&mvm->time_event_lock);
                        iwl_mvm_te_clear_data(mvm, te_data);
@@ -981,7 +1002,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
                if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-                       iwl_mvm_cancel_session_protection(mvm, mvmvif);
+                       iwl_mvm_cancel_session_protection(mvm, mvmvif,
+                                                         mvmvif->time_event_data.id);
                        set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
                } else {
                        iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
@@ -1141,6 +1163,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
 
        iwl_mvm_te_clear_data(mvm, te_data);
        te_data->duration = le32_to_cpu(cmd.duration_tu);
+       te_data->vif = vif;
        spin_unlock_bh(&mvm->time_event_lock);
 
        IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
index 1ad621d..0a13c2b 100644 (file)
@@ -1032,6 +1032,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
                return -1;
 
+       if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he)
+               return -1;
+
        if (unlikely(ieee80211_is_probe_resp(fc)))
                iwl_mvm_probe_resp_set_noa(mvm, skb);
 
index c566be9..4a3d297 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -238,316 +238,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
        return last_idx;
 }
 
-/*
- * Note: This structure is read from the device with IO accesses,
- * and the reading already does the endian conversion. As it is
- * read with u32-sized accesses, any members with a different size
- * need to be ordered correctly though!
- */
-struct iwl_error_event_table_v1 {
-       u32 valid;              /* (nonzero) valid, (0) log is empty */
-       u32 error_id;           /* type of error */
-       u32 pc;                 /* program counter */
-       u32 blink1;             /* branch link */
-       u32 blink2;             /* branch link */
-       u32 ilink1;             /* interrupt link */
-       u32 ilink2;             /* interrupt link */
-       u32 data1;              /* error-specific data */
-       u32 data2;              /* error-specific data */
-       u32 data3;              /* error-specific data */
-       u32 bcon_time;          /* beacon timer */
-       u32 tsf_low;            /* network timestamp function timer */
-       u32 tsf_hi;             /* network timestamp function timer */
-       u32 gp1;                /* GP1 timer register */
-       u32 gp2;                /* GP2 timer register */
-       u32 gp3;                /* GP3 timer register */
-       u32 ucode_ver;          /* uCode version */
-       u32 hw_ver;             /* HW Silicon version */
-       u32 brd_ver;            /* HW board version */
-       u32 log_pc;             /* log program counter */
-       u32 frame_ptr;          /* frame pointer */
-       u32 stack_ptr;          /* stack pointer */
-       u32 hcmd;               /* last host command header */
-       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
-                                * rxtx_flag */
-       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
-                                * host_flag */
-       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
-                                * enc_flag */
-       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
-                                * time_flag */
-       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
-                                * wico interrupt */
-       u32 isr_pref;           /* isr status register LMPM_NIC_PREF_STAT */
-       u32 wait_event;         /* wait event() caller address */
-       u32 l2p_control;        /* L2pControlField */
-       u32 l2p_duration;       /* L2pDurationField */
-       u32 l2p_mhvalid;        /* L2pMhValidBits */
-       u32 l2p_addr_match;     /* L2pAddrMatchStat */
-       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
-                                * (LMPM_PMG_SEL) */
-       u32 u_timestamp;        /* indicate when the date and time of the
-                                * compilation */
-       u32 flow_handler;       /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
-
-struct iwl_error_event_table {
-       u32 valid;              /* (nonzero) valid, (0) log is empty */
-       u32 error_id;           /* type of error */
-       u32 trm_hw_status0;     /* TRM HW status */
-       u32 trm_hw_status1;     /* TRM HW status */
-       u32 blink2;             /* branch link */
-       u32 ilink1;             /* interrupt link */
-       u32 ilink2;             /* interrupt link */
-       u32 data1;              /* error-specific data */
-       u32 data2;              /* error-specific data */
-       u32 data3;              /* error-specific data */
-       u32 bcon_time;          /* beacon timer */
-       u32 tsf_low;            /* network timestamp function timer */
-       u32 tsf_hi;             /* network timestamp function timer */
-       u32 gp1;                /* GP1 timer register */
-       u32 gp2;                /* GP2 timer register */
-       u32 fw_rev_type;        /* firmware revision type */
-       u32 major;              /* uCode version major */
-       u32 minor;              /* uCode version minor */
-       u32 hw_ver;             /* HW Silicon version */
-       u32 brd_ver;            /* HW board version */
-       u32 log_pc;             /* log program counter */
-       u32 frame_ptr;          /* frame pointer */
-       u32 stack_ptr;          /* stack pointer */
-       u32 hcmd;               /* last host command header */
-       u32 isr0;               /* isr status register LMPM_NIC_ISR0:
-                                * rxtx_flag */
-       u32 isr1;               /* isr status register LMPM_NIC_ISR1:
-                                * host_flag */
-       u32 isr2;               /* isr status register LMPM_NIC_ISR2:
-                                * enc_flag */
-       u32 isr3;               /* isr status register LMPM_NIC_ISR3:
-                                * time_flag */
-       u32 isr4;               /* isr status register LMPM_NIC_ISR4:
-                                * wico interrupt */
-       u32 last_cmd_id;        /* last HCMD id handled by the firmware */
-       u32 wait_event;         /* wait event() caller address */
-       u32 l2p_control;        /* L2pControlField */
-       u32 l2p_duration;       /* L2pDurationField */
-       u32 l2p_mhvalid;        /* L2pMhValidBits */
-       u32 l2p_addr_match;     /* L2pAddrMatchStat */
-       u32 lmpm_pmg_sel;       /* indicate which clocks are turned on
-                                * (LMPM_PMG_SEL) */
-       u32 u_timestamp;        /* indicate when the date and time of the
-                                * compilation */
-       u32 flow_handler;       /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
-
-/*
- * UMAC error struct - relevant starting from family 8000 chip.
- * Note: This structure is read from the device with IO accesses,
- * and the reading already does the endian conversion. As it is
- * read with u32-sized accesses, any members with a different size
- * need to be ordered correctly though!
- */
-struct iwl_umac_error_event_table {
-       u32 valid;              /* (nonzero) valid, (0) log is empty */
-       u32 error_id;           /* type of error */
-       u32 blink1;             /* branch link */
-       u32 blink2;             /* branch link */
-       u32 ilink1;             /* interrupt link */
-       u32 ilink2;             /* interrupt link */
-       u32 data1;              /* error-specific data */
-       u32 data2;              /* error-specific data */
-       u32 data3;              /* error-specific data */
-       u32 umac_major;
-       u32 umac_minor;
-       u32 frame_pointer;      /* core register 27*/
-       u32 stack_pointer;      /* core register 28 */
-       u32 cmd_header;         /* latest host cmd sent to UMAC */
-       u32 nic_isr_pref;       /* ISR status register */
-} __packed;
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
-{
-       struct iwl_trans *trans = mvm->trans;
-       struct iwl_umac_error_event_table table = {};
-       u32 base = mvm->trans->dbg.umac_error_event_table;
-
-       if (!base &&
-           !(mvm->trans->dbg.error_event_table_tlv_status &
-             IWL_ERROR_EVENT_TABLE_UMAC))
-               return;
-
-       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
-       if (table.valid)
-               mvm->fwrt.dump.umac_err_id = table.error_id;
-
-       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-                       mvm->status, table.valid);
-       }
-
-       IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
-               iwl_fw_lookup_assert_desc(table.error_id));
-       IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
-       IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
-       IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
-       IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
-       IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
-       IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
-       IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
-       IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
-       IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
-       IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
-       IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
-       IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
-}
-
-static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
-{
-       struct iwl_trans *trans = mvm->trans;
-       struct iwl_error_event_table table = {};
-       u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
-
-       if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
-               if (!base)
-                       base = mvm->fw->init_errlog_ptr;
-       } else {
-               if (!base)
-                       base = mvm->fw->inst_errlog_ptr;
-       }
-
-       if (base < 0x400000) {
-               IWL_ERR(mvm,
-                       "Not valid error log pointer 0x%08X for %s uCode\n",
-                       base,
-                       (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
-                       ? "Init" : "RT");
-               return;
-       }
-
-       /* check if there is a HW error */
-       val = iwl_trans_read_mem32(trans, base);
-       if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
-               int err;
-
-               IWL_ERR(trans, "HW error, resetting before reading\n");
-
-               /* reset the device */
-               iwl_trans_sw_reset(trans);
-
-               err = iwl_finish_nic_init(trans, trans->trans_cfg);
-               if (err)
-                       return;
-       }
-
-       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
-       if (table.valid)
-               mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
-
-       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-                       mvm->status, table.valid);
-       }
-
-       /* Do not change this output - scripts rely on it */
-
-       IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
-       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
-               iwl_fw_lookup_assert_desc(table.error_id));
-       IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
-       IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
-       IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
-       IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
-       IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
-       IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
-       IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
-       IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
-       IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
-       IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
-       IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
-       IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
-       IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
-       IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
-       IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
-       IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
-       IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
-       IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
-       IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
-       IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
-       IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
-       IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
-       IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
-       IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
-       IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
-       IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
-       IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
-       IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
-       IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
-       IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
-       IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
-       IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
-       IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-}
-
-static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
-{
-       struct iwl_trans *trans = mvm->trans;
-       u32 error, data1;
-
-       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
-               error = UMAG_SB_CPU_2_STATUS;
-               data1 = UMAG_SB_CPU_1_STATUS;
-       } else if (mvm->trans->trans_cfg->device_family >=
-                  IWL_DEVICE_FAMILY_8000) {
-               error = SB_CPU_2_STATUS;
-               data1 = SB_CPU_1_STATUS;
-       } else {
-               return;
-       }
-
-       error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
-
-       IWL_ERR(trans, "IML/ROM dump:\n");
-
-       if (error & 0xFFFF0000)
-               IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
-
-       IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
-       IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
-               iwl_read_umac_prph(trans, data1));
-
-       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
-               IWL_ERR(mvm, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
-                       iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
-}
-
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
-{
-       if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
-               IWL_ERR(mvm,
-                       "DEVICE_ENABLED bit is not set. Aborting dump.\n");
-               return;
-       }
-
-       iwl_mvm_dump_lmac_error_log(mvm, 0);
-
-       if (mvm->trans->dbg.lmac_error_event_table[1])
-               iwl_mvm_dump_lmac_error_log(mvm, 1);
-
-       iwl_mvm_dump_umac_error_log(mvm);
-
-       iwl_mvm_dump_iml_error_log(mvm);
-
-       iwl_fw_error_print_fseq_regs(&mvm->fwrt);
-}
-
 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
                         int tid, int frame_limit, u16 ssn)
 {
@@ -621,7 +311,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                         enum ieee80211_smps_mode smps_request)
 {
        struct iwl_mvm_vif *mvmvif;
-       enum ieee80211_smps_mode smps_mode;
+       enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
        int i;
 
        lockdep_assert_held(&mvm->mutex);
@@ -630,10 +320,8 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
                return;
 
-       if (vif->type == NL80211_IFTYPE_AP)
-               smps_mode = IEEE80211_SMPS_OFF;
-       else
-               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
 
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
        mvmvif->smps_requests[req_type] = smps_request;
@@ -683,23 +371,37 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
        mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
 }
 
+struct iwl_mvm_diversity_iter_data {
+       struct iwl_mvm_phy_ctxt *ctxt;
+       bool result;
+};
+
 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
                                   struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       bool *result = _data;
+       struct iwl_mvm_diversity_iter_data *data = _data;
        int i;
 
+       if (mvmvif->phy_ctxt != data->ctxt)
+               return;
+
        for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
                if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
-                   mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
-                       *result = false;
+                   mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
+                       data->result = false;
+                       break;
+               }
        }
 }
 
-bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_phy_ctxt *ctxt)
 {
-       bool result = true;
+       struct iwl_mvm_diversity_iter_data data = {
+               .ctxt = ctxt,
+               .result = true,
+       };
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -711,9 +413,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
 
        ieee80211_iterate_active_interfaces_atomic(
                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                       iwl_mvm_diversity_iter, &result);
+                       iwl_mvm_diversity_iter, &data);
 
-       return result;
+       return data.result;
 }
 
 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
@@ -1398,7 +1100,8 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
        return iwl_read_prph(mvm->trans, reg_addr);
 }
 
-void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
+                          u32 *gp2, u64 *boottime, ktime_t *realtime)
 {
        bool ps_disabled;
 
@@ -1412,7 +1115,11 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
        }
 
        *gp2 = iwl_mvm_get_systime(mvm);
-       *boottime = ktime_get_boottime_ns();
+
+       if (clock_type == CLOCK_BOOTTIME && boottime)
+               *boottime = ktime_get_boottime_ns();
+       else if (clock_type == CLOCK_REALTIME && realtime)
+               *realtime = ktime_get_real();
 
        if (!ps_disabled) {
                mvm->ps_disabled = ps_disabled;
index cecc32e..239a722 100644 (file)
@@ -79,7 +79,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        struct iwl_prph_scratch *prph_scratch;
        struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
        struct iwl_prph_info *prph_info;
-       void *iml_img;
        u32 control_flags = 0;
        int ret;
        int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -138,8 +137,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
 
        /* Allocate prph information
         * currently we don't assign to the prph info anything, but it would get
-        * assigned later */
-       prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+        * assigned later
+        *
+        * We also use the second half of this page to give the device some
+        * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
+        * use this, but the hardware still reads/writes there and we can't let
+        * it go do that with a NULL pointer.
+        */
+       BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
+       prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
                                       &trans_pcie->prph_info_dma_addr,
                                       GFP_KERNEL);
        if (!prph_info) {
@@ -166,13 +172,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        ctxt_info_gen3->cr_head_idx_arr_base_addr =
                cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
        ctxt_info_gen3->tr_tail_idx_arr_base_addr =
-               cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+               cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
        ctxt_info_gen3->cr_tail_idx_arr_base_addr =
-               cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
-       ctxt_info_gen3->cr_idx_arr_size =
-               cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
-       ctxt_info_gen3->tr_idx_arr_size =
-               cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+               cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
        ctxt_info_gen3->mtr_base_addr =
                cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
        ctxt_info_gen3->mcr_base_addr =
@@ -187,14 +189,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        trans_pcie->prph_scratch = prph_scratch;
 
        /* Allocate IML */
-       iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
-                                    &trans_pcie->iml_dma_addr, GFP_KERNEL);
-       if (!iml_img) {
+       trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
+                                            &trans_pcie->iml_dma_addr,
+                                            GFP_KERNEL);
+       if (!trans_pcie->iml) {
                ret = -ENOMEM;
                goto err_free_ctxt_info;
        }
 
-       memcpy(iml_img, trans->iml, trans->iml_len);
+       memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
 
        iwl_enable_fw_load_int_ctx_info(trans);
 
@@ -216,10 +219,8 @@ err_free_ctxt_info:
                          trans_pcie->ctxt_info_dma_addr);
        trans_pcie->ctxt_info_gen3 = NULL;
 err_free_prph_info:
-       dma_free_coherent(trans->dev,
-                         sizeof(*prph_info),
-                       prph_info,
-                       trans_pcie->prph_info_dma_addr);
+       dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
+                         trans_pcie->prph_info_dma_addr);
 
 err_free_prph_scratch:
        dma_free_coherent(trans->dev,
@@ -230,29 +231,40 @@ err_free_prph_scratch:
 
 }
 
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+       if (trans_pcie->iml) {
+               dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
+                                 trans_pcie->iml_dma_addr);
+               trans_pcie->iml_dma_addr = 0;
+               trans_pcie->iml = NULL;
+       }
+
+       iwl_pcie_ctxt_info_free_fw_img(trans);
+
+       if (alive)
+               return;
+
        if (!trans_pcie->ctxt_info_gen3)
                return;
 
+       /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
        dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
                          trans_pcie->ctxt_info_gen3,
                          trans_pcie->ctxt_info_dma_addr);
        trans_pcie->ctxt_info_dma_addr = 0;
        trans_pcie->ctxt_info_gen3 = NULL;
 
-       iwl_pcie_ctxt_info_free_fw_img(trans);
-
        dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
                          trans_pcie->prph_scratch,
                          trans_pcie->prph_scratch_dma_addr);
        trans_pcie->prph_scratch_dma_addr = 0;
        trans_pcie->prph_scratch = NULL;
 
-       dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
-                         trans_pcie->prph_info,
+       /* this is needed for the entire lifetime */
+       dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
                          trans_pcie->prph_info_dma_addr);
        trans_pcie->prph_info_dma_addr = 0;
        trans_pcie->prph_info = NULL;
@@ -290,3 +302,37 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
 
        return 0;
 }
+
+int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+                                                 const void *data, u32 len)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+               &trans_pcie->prph_scratch->ctrl_cfg;
+       int ret;
+
+       if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+               return 0;
+
+       /* only allocate the DRAM if not allocated yet */
+       if (!trans->reduce_power_loaded) {
+               if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
+                       return -EBUSY;
+
+               ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+                                          &trans_pcie->reduce_power_dram);
+               if (ret < 0) {
+                       IWL_DEBUG_FW(trans,
+                                    "Failed to allocate reduce power DMA %d.\n",
+                                    ret);
+                       return ret;
+               }
+       }
+
+       prph_sc_ctrl->reduce_power_cfg.base_addr =
+               cpu_to_le64(trans_pcie->reduce_power_dram.physical);
+       prph_sc_ctrl->reduce_power_cfg.size =
+               cpu_to_le32(trans_pcie->reduce_power_dram.size);
+
+       return 0;
+}
index d94bd8d..16baee3 100644 (file)
@@ -532,6 +532,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
        IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
        IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+       IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
+       IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
 
        IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
 
@@ -1030,6 +1032,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
                      iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_ma_a0_fm_a0, iwl_ax231_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
                      IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
@@ -1209,14 +1216,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
                if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
                        iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
-               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
-                          CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+               } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+                          CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) {
                        iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
-               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
-                          CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
+               } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+                          CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) {
                        iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
-               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
-                          CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
+               } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
+                          CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) {
                        iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
                }
        }
index 76a512c..cc550f6 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2003-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -109,12 +109,8 @@ struct iwl_rx_completion_desc {
  *     Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
  *     In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
- * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
- * @tr_tail: driver's pointer to the transmission ring tail buffer
- * @tr_tail_dma: physical address of the buffer for the transmission ring tail
- * @cr_tail: driver's pointer to the completion ring tail buffer
- * @cr_tail_dma: physical address of the buffer for the completion ring tail
+ * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
@@ -142,10 +138,6 @@ struct iwl_rxq {
                struct iwl_rx_completion_desc *cd;
        };
        dma_addr_t used_bd_dma;
-       __le16 *tr_tail;
-       dma_addr_t tr_tail_dma;
-       __le16 *cr_tail;
-       dma_addr_t cr_tail_dma;
        u32 read;
        u32 write;
        u32 free_count;
@@ -279,6 +271,8 @@ struct cont_rec {
  *     Context information addresses will be taken from here.
  *     This is driver's local copy for keeping track of size and
  *     count for allocating and freeing the memory.
+ * @iml: image loader image virtual address
+ * @iml_dma_addr: image loader image DMA address
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
  * @kw: keep warm address
@@ -317,6 +311,7 @@ struct cont_rec {
  * @alloc_page_lock: spinlock for the page allocator
  * @alloc_page: allocated page to still use parts of
  * @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @rf_name: name/version of the CRF, if any
  */
 struct iwl_trans_pcie {
        struct iwl_rxq *rxq;
@@ -329,6 +324,7 @@ struct iwl_trans_pcie {
        };
        struct iwl_prph_info *prph_info;
        struct iwl_prph_scratch *prph_scratch;
+       void *iml;
        dma_addr_t ctxt_info_dma_addr;
        dma_addr_t prph_info_dma_addr;
        dma_addr_t prph_scratch_dma_addr;
@@ -353,6 +349,7 @@ struct iwl_trans_pcie {
        struct iwl_dma_ptr kw;
 
        struct iwl_dram_data pnvm_dram;
+       struct iwl_dram_data reduce_power_dram;
 
        struct iwl_txq *txq_memory;
 
@@ -409,6 +406,8 @@ struct iwl_trans_pcie {
        bool fw_reset_handshake;
        bool fw_reset_done;
        wait_queue_head_t fw_reset_waitq;
+
+       char rf_name[32];
 };
 
 static inline struct iwl_trans_pcie *
@@ -530,9 +529,6 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
        IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
 }
 
-#define IWL_NUM_OF_COMPLETION_RINGS    31
-#define IWL_NUM_OF_TRANSFER_RINGS      527
-
 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
                                            int start)
 {
index fb84914..4f6f4b2 100644 (file)
@@ -663,7 +663,6 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
                                  struct iwl_rxq *rxq)
 {
-       struct device *dev = trans->dev;
        bool use_rx_td = (trans->trans_cfg->device_family >=
                          IWL_DEVICE_FAMILY_AX210);
        int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
@@ -685,21 +684,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
                                  rxq->used_bd, rxq->used_bd_dma);
        rxq->used_bd_dma = 0;
        rxq->used_bd = NULL;
-
-       if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
-               return;
-
-       if (rxq->tr_tail)
-               dma_free_coherent(dev, sizeof(__le16),
-                                 rxq->tr_tail, rxq->tr_tail_dma);
-       rxq->tr_tail_dma = 0;
-       rxq->tr_tail = NULL;
-
-       if (rxq->cr_tail)
-               dma_free_coherent(dev, sizeof(__le16),
-                                 rxq->cr_tail, rxq->cr_tail_dma);
-       rxq->cr_tail_dma = 0;
-       rxq->cr_tail = NULL;
 }
 
 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
@@ -744,21 +728,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
        rxq->rb_stts_dma =
                trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
 
-       if (!use_rx_td)
-               return 0;
-
-       /* Allocate the driver's pointer to TR tail */
-       rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
-                                         &rxq->tr_tail_dma, GFP_KERNEL);
-       if (!rxq->tr_tail)
-               goto err;
-
-       /* Allocate the driver's pointer to CR tail */
-       rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
-                                         &rxq->cr_tail_dma, GFP_KERNEL);
-       if (!rxq->cr_tail)
-               goto err;
-
        return 0;
 
 err:
@@ -1590,9 +1559,6 @@ restart:
 out:
        /* Backtrack one entry */
        rxq->read = i;
-       /* update cr tail with the rxq read pointer */
-       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               *rxq->cr_tail = cpu_to_le16(r);
        spin_unlock(&rxq->lock);
 
        /*
index 1bcd36e..a340093 100644 (file)
@@ -149,7 +149,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
 
        iwl_pcie_ctxt_info_free_paging(trans);
        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               iwl_pcie_ctxt_info_gen3_free(trans);
+               iwl_pcie_ctxt_info_gen3_free(trans, false);
        else
                iwl_pcie_ctxt_info_free(trans);
 
@@ -240,6 +240,75 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
        return 0;
 }
 
+static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       char *buf = trans_pcie->rf_name;
+       size_t buflen = sizeof(trans_pcie->rf_name);
+       size_t pos;
+       u32 version;
+
+       if (buf[0])
+               return;
+
+       switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
+               pos = scnprintf(buf, buflen, "JF");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
+               pos = scnprintf(buf, buflen, "GF");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
+               pos = scnprintf(buf, buflen, "GF4");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+               pos = scnprintf(buf, buflen, "HR");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+               pos = scnprintf(buf, buflen, "HR1");
+               break;
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+               pos = scnprintf(buf, buflen, "HRCDB");
+               break;
+       default:
+               return;
+       }
+
+       switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
+       case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
+               version = iwl_read_prph(trans, CNVI_MBOX_C);
+               switch (version) {
+               case 0x20000:
+                       pos += scnprintf(buf + pos, buflen - pos, " B3");
+                       break;
+               case 0x120000:
+                       pos += scnprintf(buf + pos, buflen - pos, " B5");
+                       break;
+               default:
+                       pos += scnprintf(buf + pos, buflen - pos,
+                                        " (0x%x)", version);
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
+                        trans->hw_rf_id);
+
+       IWL_INFO(trans, "Detected RF %s\n", buf);
+
+       /*
+        * also add a \n for debugfs - need to do it after printing
+        * since our IWL_INFO machinery wants to see a static \n at
+        * the end of the string
+        */
+       pos += scnprintf(buf + pos, buflen - pos, "\n");
+}
+
 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -254,7 +323,10 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        /* now that we got alive we can free the fw image & the context info.
         * paging memory cannot be freed included since FW will still use it
         */
-       iwl_pcie_ctxt_info_free(trans);
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               iwl_pcie_ctxt_info_gen3_free(trans, true);
+       else
+               iwl_pcie_ctxt_info_free(trans);
 
        /*
         * Re-enable all the interrupts, including the RF-Kill one, now that
@@ -263,6 +335,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        iwl_enable_interrupts(trans);
        mutex_lock(&trans_pcie->mutex);
        iwl_pcie_check_hw_rf_kill(trans);
+
+       iwl_pcie_get_rf_name(trans);
        mutex_unlock(&trans_pcie->mutex);
 }
 
index 239bc17..bee6b45 100644 (file)
@@ -1648,7 +1648,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
                if (ret)
                        IWL_ERR(trans_pcie->trans,
                                "Failed to set affinity mask for IRQ %d\n",
-                               i);
+                               trans_pcie->msix_entries[i].vector);
        }
 }
 
@@ -1943,6 +1943,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
                                  trans_pcie->pnvm_dram.block,
                                  trans_pcie->pnvm_dram.physical);
 
+       if (trans_pcie->reduce_power_dram.size)
+               dma_free_coherent(trans->dev,
+                                 trans_pcie->reduce_power_dram.size,
+                                 trans_pcie->reduce_power_dram.block,
+                                 trans_pcie->reduce_power_dram.physical);
+
        mutex_destroy(&trans_pcie->mutex);
        iwl_trans_free(trans);
 }
@@ -2848,11 +2854,28 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
        return bytes_copied;
 }
 
+static ssize_t iwl_dbgfs_rf_read(struct file *file,
+                                char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct iwl_trans *trans = file->private_data;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (!trans_pcie->rf_name[0])
+               return -ENODEV;
+
+       return simple_read_from_buffer(user_buf, count, ppos,
+                                      trans_pcie->rf_name,
+                                      strlen(trans_pcie->rf_name));
+}
+
 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
 DEBUGFS_READ_FILE_OPS(fh_reg);
 DEBUGFS_READ_FILE_OPS(rx_queue);
 DEBUGFS_WRITE_FILE_OPS(csr);
 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+DEBUGFS_READ_FILE_OPS(rf);
+
 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
        .owner = THIS_MODULE,
        .open = iwl_dbgfs_tx_queue_open,
@@ -2879,6 +2902,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
        DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
        DEBUGFS_ADD_FILE(rfkill, dir, 0600);
        DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
+       DEBUGFS_ADD_FILE(rf, dir, 0400);
 }
 
 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
@@ -3400,6 +3424,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
        .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
        .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
        .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
+       .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
 #endif
@@ -3413,6 +3438,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        struct iwl_trans *trans;
        int ret, addr_size;
        const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+       void __iomem * const *table;
 
        if (!cfg_trans->gen2)
                ops = &trans_ops_pcie;
@@ -3485,9 +3511,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                goto out_no_pci;
        }
 
-       trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
-       if (!trans_pcie->hw_base) {
+       table = pcim_iomap_table(pdev);
+       if (!table) {
                dev_err(&pdev->dev, "pcim_iomap_table failed\n");
+               ret = -ENOMEM;
+               goto out_no_pci;
+       }
+
+       trans_pcie->hw_base = table[0];
+       if (!trans_pcie->hw_base) {
+               dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
                ret = -ENODEV;
                goto out_no_pci;
        }
index 2c7adb4..0aea35c 100644 (file)
@@ -988,15 +988,18 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
  * tsc must be NULL or up to 8 bytes
  */
 int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
-                             int set_tx, const u8 *key, const u8 *rsc,
-                             size_t rsc_len, const u8 *tsc, size_t tsc_len)
+                             int set_tx, const u8 *key, size_t key_len,
+                             const u8 *rsc, size_t rsc_len,
+                             const u8 *tsc, size_t tsc_len)
 {
        struct {
                __le16 idx;
                u8 rsc[ORINOCO_SEQ_LEN];
-               u8 key[TKIP_KEYLEN];
-               u8 tx_mic[MIC_KEYLEN];
-               u8 rx_mic[MIC_KEYLEN];
+               struct {
+                       u8 key[TKIP_KEYLEN];
+                       u8 tx_mic[MIC_KEYLEN];
+                       u8 rx_mic[MIC_KEYLEN];
+               } tkip;
                u8 tsc[ORINOCO_SEQ_LEN];
        } __packed buf;
        struct hermes *hw = &priv->hw;
@@ -1011,8 +1014,9 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
                key_idx |= 0x8000;
 
        buf.idx = cpu_to_le16(key_idx);
-       memcpy(buf.key, key,
-              sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
+       if (key_len != sizeof(buf.tkip))
+               return -EINVAL;
+       memcpy(&buf.tkip, key, sizeof(buf.tkip));
 
        if (rsc_len > sizeof(buf.rsc))
                rsc_len = sizeof(buf.rsc);
index 466d1ed..da5804d 100644 (file)
@@ -38,8 +38,9 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
 int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
 int __orinoco_hw_setup_enc(struct orinoco_private *priv);
 int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
-                             int set_tx, const u8 *key, const u8 *rsc,
-                             size_t rsc_len, const u8 *tsc, size_t tsc_len);
+                             int set_tx, const u8 *key, size_t key_len,
+                             const u8 *rsc, size_t rsc_len,
+                             const u8 *tsc, size_t tsc_len);
 int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
 int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
                                    struct net_device *dev,
index 7b6c4ae..4a01260 100644 (file)
@@ -791,7 +791,7 @@ static int orinoco_ioctl_set_encodeext(struct net_device *dev,
 
                        err = __orinoco_hw_set_tkip_key(priv, idx,
                                 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
-                                priv->keys[idx].key,
+                                priv->keys[idx].key, priv->keys[idx].key_len,
                                 tkip_iv, ORINOCO_SEQ_LEN, NULL, 0);
                        if (err)
                                printk(KERN_ERR "%s: Error %d setting TKIP key"
index 51ce767..ffa894f 100644 (file)
@@ -626,6 +626,7 @@ struct mac80211_hwsim_data {
        u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
 
        struct mac_address addresses[2];
+       struct ieee80211_chanctx_conf *chanctx;
        int channels, idx;
        bool use_chanctx;
        bool destroy_on_close;
@@ -1257,7 +1258,8 @@ static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
 
 static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                                       struct sk_buff *my_skb,
-                                      int dst_portid)
+                                      int dst_portid,
+                                      struct ieee80211_channel *channel)
 {
        struct sk_buff *skb;
        struct mac80211_hwsim_data *data = hw->priv;
@@ -1312,7 +1314,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
        if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
                goto nla_put_failure;
 
-       if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
+       if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq))
                goto nla_put_failure;
 
        /* We get the tx control (rate and retries) info*/
@@ -1659,7 +1661,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
        _portid = READ_ONCE(data->wmediumd);
 
        if (_portid || hwsim_virtio_enabled)
-               return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
+               return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel);
 
        /* NO wmediumd detected, perfect medium simulation */
        data->tx_pkts++;
@@ -1693,8 +1695,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
 static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
 {
        struct mac80211_hwsim_data *data = hw->priv;
+
        data->started = false;
        hrtimer_cancel(&data->beacon_timer);
+
+       while (!skb_queue_empty(&data->pending))
+               ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
+
        wiphy_dbg(hw->wiphy, "%s\n", __func__);
 }
 
@@ -1770,8 +1777,10 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
        mac80211_hwsim_monitor_rx(hw, skb, chan);
 
        if (_pid || hwsim_virtio_enabled)
-               return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+               return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);
 
+       data->tx_pkts++;
+       data->tx_bytes += skb->len;
        mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
        dev_kfree_skb(skb);
 }
@@ -2509,6 +2518,11 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
 static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
                                      struct ieee80211_chanctx_conf *ctx)
 {
+       struct mac80211_hwsim_data *hwsim = hw->priv;
+
+       mutex_lock(&hwsim->mutex);
+       hwsim->chanctx = ctx;
+       mutex_unlock(&hwsim->mutex);
        hwsim_set_chanctx_magic(ctx);
        wiphy_dbg(hw->wiphy,
                  "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2520,6 +2534,11 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
 static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
                                          struct ieee80211_chanctx_conf *ctx)
 {
+       struct mac80211_hwsim_data *hwsim = hw->priv;
+
+       mutex_lock(&hwsim->mutex);
+       hwsim->chanctx = NULL;
+       mutex_unlock(&hwsim->mutex);
        wiphy_dbg(hw->wiphy,
                  "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
                  ctx->def.chan->center_freq, ctx->def.width,
@@ -2532,6 +2551,11 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
                                          struct ieee80211_chanctx_conf *ctx,
                                          u32 changed)
 {
+       struct mac80211_hwsim_data *hwsim = hw->priv;
+
+       mutex_lock(&hwsim->mutex);
+       hwsim->chanctx = ctx;
+       mutex_unlock(&hwsim->mutex);
        hwsim_check_chanctx_magic(ctx);
        wiphy_dbg(hw->wiphy,
                  "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -3124,6 +3148,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                hw->wiphy->max_remain_on_channel_duration = 1000;
                data->if_combination.radar_detect_widths = 0;
                data->if_combination.num_different_channels = data->channels;
+               data->chanctx = NULL;
        } else {
                data->if_combination.num_different_channels = 1;
                data->if_combination.radar_detect_widths =
@@ -3633,6 +3658,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
        int frame_data_len;
        void *frame_data;
        struct sk_buff *skb = NULL;
+       struct ieee80211_channel *channel = NULL;
 
        if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
            !info->attrs[HWSIM_ATTR_FRAME] ||
@@ -3659,6 +3685,17 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
        if (!data2)
                goto out;
 
+       if (data2->use_chanctx) {
+               if (data2->tmp_chan)
+                       channel = data2->tmp_chan;
+               else if (data2->chanctx)
+                       channel = data2->chanctx->def.chan;
+       } else {
+               channel = data2->channel;
+       }
+       if (!channel)
+               goto out;
+
        if (!hwsim_virtio_enabled) {
                if (hwsim_net_get_netgroup(genl_info_net(info)) !=
                    data2->netgroup)
@@ -3670,7 +3707,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
 
        /* check if radio is configured properly */
 
-       if (data2->idle || !data2->started)
+       if ((data2->idle && !data2->tmp_chan) || !data2->started)
                goto out;
 
        /* A frame is received from user space */
@@ -3683,18 +3720,16 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
                mutex_lock(&data2->mutex);
                rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);
 
-               if (rx_status.freq != data2->channel->center_freq &&
-                   (!data2->tmp_chan ||
-                    rx_status.freq != data2->tmp_chan->center_freq)) {
+               if (rx_status.freq != channel->center_freq) {
                        mutex_unlock(&data2->mutex);
                        goto out;
                }
                mutex_unlock(&data2->mutex);
        } else {
-               rx_status.freq = data2->channel->center_freq;
+               rx_status.freq = channel->center_freq;
        }
 
-       rx_status.band = data2->channel->band;
+       rx_status.band = channel->band;
        rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
        rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
 
@@ -3791,11 +3826,6 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                return -EINVAL;
        }
 
-       if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
-               GENL_SET_ERR_MSG(info, "too many channels specified");
-               return -EINVAL;
-       }
-
        if (info->attrs[HWSIM_ATTR_NO_VIF])
                param.no_vif = true;
 
index 470d669..2ff23ab 100644 (file)
@@ -995,6 +995,11 @@ struct host_cmd_ds_802_11_key_material {
        struct mwifiex_ie_type_key_param_set key_param_set;
 } __packed;
 
+struct host_cmd_ds_802_11_key_material_wep {
+       __le16 action;
+       struct mwifiex_ie_type_key_param_set key_param_set[NUM_WEP_KEYS];
+} __packed;
+
 struct host_cmd_ds_gen {
        __le16 command;
        __le16 size;
@@ -2347,6 +2352,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_wmm_get_status get_wmm_status;
                struct host_cmd_ds_802_11_key_material key_material;
                struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
+               struct host_cmd_ds_802_11_key_material_wep key_material_wep;
                struct host_cmd_ds_version_ext verext;
                struct host_cmd_ds_mgmt_frame_reg reg_mask;
                struct host_cmd_ds_remain_on_chan roc_cfg;
index d3a968e..48ea00d 100644 (file)
@@ -840,14 +840,15 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
        }
 
        if (!enc_key) {
-               memset(&key_material->key_param_set, 0,
-                      (NUM_WEP_KEYS *
-                       sizeof(struct mwifiex_ie_type_key_param_set)));
+               struct host_cmd_ds_802_11_key_material_wep *key_material_wep =
+                       (struct host_cmd_ds_802_11_key_material_wep *)key_material;
+               memset(key_material_wep->key_param_set, 0,
+                      sizeof(key_material_wep->key_param_set));
                ret = mwifiex_set_keyparamset_wep(priv,
-                                                 &key_material->key_param_set,
+                                                 &key_material_wep->key_param_set[0],
                                                  &key_param_len);
                cmd->size = cpu_to_le16(key_param_len +
-                                   sizeof(key_material->action) + S_DS_GEN);
+                                   sizeof(key_material_wep->action) + S_DS_GEN);
                return ret;
        } else
                memset(&key_material->key_param_set, 0,
index 84b32a5..3bf6571 100644 (file)
@@ -4552,7 +4552,7 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
        else
                rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
        legacy_rate_mask_to_array(p->legacy_rates, rates);
-       memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+       memcpy(p->ht_rates, &sta->ht_cap.mcs, 16);
        p->interop = 1;
        p->amsdu_enabled = 0;
 
@@ -5034,7 +5034,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        ap_legacy_rates =
                                ap->supp_rates[NL80211_BAND_5GHZ] << 5;
                }
-               memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
+               memcpy(ap_mcs_rates, &ap->ht_cap.mcs, 16);
 
                rcu_read_unlock();
 
index 72b1cc0..5e1c150 100644 (file)
@@ -191,6 +191,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
 
        q->entry[idx].txwi = txwi;
        q->entry[idx].skb = skb;
+       q->entry[idx].wcid = 0xffff;
 
        return idx;
 }
@@ -349,6 +350,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
                      struct sk_buff *skb, struct mt76_wcid *wcid,
                      struct ieee80211_sta *sta)
 {
+       struct ieee80211_tx_status status = {
+               .sta = sta,
+       };
        struct mt76_tx_info tx_info = {
                .skb = skb,
        };
@@ -360,11 +364,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
        u8 *txwi;
 
        t = mt76_get_txwi(dev);
-       if (!t) {
-               hw = mt76_tx_status_get_hw(dev, skb);
-               ieee80211_free_txskb(hw, skb);
-               return -ENOMEM;
-       }
+       if (!t)
+               goto free_skb;
+
        txwi = mt76_get_txwi_ptr(dev, t);
 
        skb->prev = skb->next = NULL;
@@ -427,8 +429,13 @@ free:
        }
 #endif
 
-       dev_kfree_skb(tx_info.skb);
        mt76_put_txwi(dev, t);
+
+free_skb:
+       status.skb = tx_info.skb;
+       hw = mt76_tx_status_get_hw(dev, tx_info.skb);
+       ieee80211_tx_status_ext(hw, &status);
+
        return ret;
 }
 
index 03fe628..d03aedc 100644 (file)
@@ -83,6 +83,22 @@ static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
        { .throughput = 300 * 1024, .blink_time =  50 },
 };
 
+struct ieee80211_rate mt76_rates[] = {
+       CCK_RATE(0, 10),
+       CCK_RATE(1, 20),
+       CCK_RATE(2, 55),
+       CCK_RATE(3, 110),
+       OFDM_RATE(11, 60),
+       OFDM_RATE(15, 90),
+       OFDM_RATE(10, 120),
+       OFDM_RATE(14, 180),
+       OFDM_RATE(9,  240),
+       OFDM_RATE(13, 360),
+       OFDM_RATE(8,  480),
+       OFDM_RATE(12, 540),
+};
+EXPORT_SYMBOL_GPL(mt76_rates);
+
 static int mt76_led_init(struct mt76_dev *dev)
 {
        struct device_node *np = dev->dev->of_node;
@@ -315,17 +331,6 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
        ieee80211_hw_set(hw, MFP_CAPABLE);
        ieee80211_hw_set(hw, AP_LINK_PS);
        ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
-
-       wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
-       wiphy->interface_modes =
-               BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
-               BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
-               BIT(NL80211_IFTYPE_P2P_CLIENT) |
-               BIT(NL80211_IFTYPE_P2P_GO) |
-               BIT(NL80211_IFTYPE_ADHOC);
 }
 
 struct mt76_phy *
@@ -346,6 +351,17 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
        phy->hw = hw;
        phy->priv = hw->priv + phy_size;
 
+       hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+       hw->wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+               BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_P2P_GO) |
+               BIT(NL80211_IFTYPE_ADHOC);
+
        return phy;
 }
 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
@@ -428,6 +444,17 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
        mutex_init(&dev->mcu.mutex);
        dev->tx_worker.fn = mt76_tx_worker;
 
+       hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+       hw->wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+               BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_P2P_GO) |
+               BIT(NL80211_IFTYPE_ADHOC);
+
        spin_lock_init(&dev->token_lock);
        idr_init(&dev->token);
 
@@ -632,20 +659,19 @@ void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
 }
 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
 
-void mt76_update_survey(struct mt76_dev *dev)
+void mt76_update_survey(struct mt76_phy *phy)
 {
+       struct mt76_dev *dev = phy->dev;
        ktime_t cur_time;
 
        if (dev->drv->update_survey)
-               dev->drv->update_survey(dev);
+               dev->drv->update_survey(phy);
 
        cur_time = ktime_get_boottime();
-       mt76_update_survey_active_time(&dev->phy, cur_time);
-       if (dev->phy2)
-               mt76_update_survey_active_time(dev->phy2, cur_time);
+       mt76_update_survey_active_time(phy, cur_time);
 
        if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
-               struct mt76_channel_state *state = dev->phy.chan_state;
+               struct mt76_channel_state *state = phy->chan_state;
 
                spin_lock_bh(&dev->cc_lock);
                state->cc_bss_rx += dev->cur_cc_bss_rx;
@@ -664,7 +690,7 @@ void mt76_set_channel(struct mt76_phy *phy)
        int timeout = HZ / 5;
 
        wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
-       mt76_update_survey(dev);
+       mt76_update_survey(phy);
 
        phy->chandef = *chandef;
        phy->chan_state = mt76_channel_state(phy, chandef->chan);
@@ -689,7 +715,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
 
        mutex_lock(&dev->mutex);
        if (idx == 0 && dev->drv->update_survey)
-               mt76_update_survey(dev);
+               mt76_update_survey(phy);
 
        sband = &phy->sband_2g;
        if (idx >= sband->sband.n_channels) {
index 36ede65..25c5cee 100644 (file)
@@ -87,6 +87,22 @@ enum mt76_rxq_id {
        __MT_RXQ_MAX
 };
 
+enum mt76_cipher_type {
+       MT_CIPHER_NONE,
+       MT_CIPHER_WEP40,
+       MT_CIPHER_TKIP,
+       MT_CIPHER_TKIP_NO_MIC,
+       MT_CIPHER_AES_CCMP,
+       MT_CIPHER_WEP104,
+       MT_CIPHER_BIP_CMAC_128,
+       MT_CIPHER_WEP128,
+       MT_CIPHER_WAPI,
+       MT_CIPHER_CCMP_CCX,
+       MT_CIPHER_CCMP_256,
+       MT_CIPHER_GCMP,
+       MT_CIPHER_GCMP_256,
+};
+
 struct mt76_queue_buf {
        dma_addr_t addr;
        u16 len;
@@ -320,6 +336,7 @@ enum {
 struct mt76_hw_cap {
        bool has_2ghz;
        bool has_5ghz;
+       bool has_6ghz;
 };
 
 #define MT_DRV_TXWI_NO_FREE            BIT(0)
@@ -336,7 +353,7 @@ struct mt76_driver_ops {
        u16 token_size;
        u8 mcs_rates;
 
-       void (*update_survey)(struct mt76_dev *dev);
+       void (*update_survey)(struct mt76_phy *phy);
 
        int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
                              enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -738,6 +755,21 @@ enum mt76_phy_type {
        MT_PHY_TYPE_HE_MU,
 };
 
+#define CCK_RATE(_idx, _rate) {                                        \
+       .bitrate = _rate,                                       \
+       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
+       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),            \
+       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),  \
+}
+
+#define OFDM_RATE(_idx, _rate) {                               \
+       .bitrate = _rate,                                       \
+       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),           \
+       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),     \
+}
+
+extern struct ieee80211_rate mt76_rates[12];
+
 #define __mt76_rr(dev, ...)    (dev)->bus->rr((dev), __VA_ARGS__)
 #define __mt76_wr(dev, ...)    (dev)->bus->wr((dev), __VA_ARGS__)
 #define __mt76_rmw(dev, ...)   (dev)->bus->rmw((dev), __VA_ARGS__)
@@ -1031,7 +1063,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
                                  bool more_data);
 bool mt76_has_tx_pending(struct mt76_phy *phy);
 void mt76_set_channel(struct mt76_phy *phy);
-void mt76_update_survey(struct mt76_dev *dev);
+void mt76_update_survey(struct mt76_phy *phy);
 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
                    struct survey_info *survey);
@@ -1056,7 +1088,14 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
                                       struct sk_buff_head *list);
 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
                             struct sk_buff_head *list);
-void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb);
+void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
+                           struct list_head *free_list);
+static inline void
+mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
+{
+    __mt76_tx_complete_skb(dev, wcid, skb, NULL);
+}
+
 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
                          bool flush);
 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1253,4 +1292,15 @@ mt76_token_put(struct mt76_dev *dev, int token)
 
        return txwi;
 }
+
+static inline int
+mt76_get_next_pkt_id(struct mt76_wcid *wcid)
+{
+       wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
+       if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
+           wcid->packet_id == MT_PACKET_ID_NO_SKB)
+               wcid->packet_id = MT_PACKET_ID_FIRST;
+
+       return wcid->packet_id;
+}
 #endif
index e1b2cfa..031d39a 100644 (file)
@@ -304,34 +304,6 @@ mt7603_init_hardware(struct mt7603_dev *dev)
        return 0;
 }
 
-#define CCK_RATE(_idx, _rate) {                                        \
-       .bitrate = _rate,                                       \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),            \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),  \
-}
-
-#define OFDM_RATE(_idx, _rate) {                               \
-       .bitrate = _rate,                                       \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),           \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),     \
-}
-
-static struct ieee80211_rate mt7603_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = 1,
@@ -569,8 +541,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
 
        wiphy->reg_notifier = mt7603_regd_notifier;
 
-       ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
-                                  ARRAY_SIZE(mt7603_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
        if (ret)
                return ret;
 
index fbceb07..3972c56 100644 (file)
@@ -550,14 +550,27 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
 
                rxd += 4;
@@ -831,7 +844,7 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
        sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
 }
 
-static enum mt7603_cipher_type
+static enum mt76_cipher_type
 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
 {
        memset(key_data, 0, 32);
@@ -863,7 +876,7 @@ mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
                        struct ieee80211_key_conf *key)
 {
-       enum mt7603_cipher_type cipher;
+       enum mt76_cipher_type cipher;
        u32 addr = mt7603_wtbl3_addr(wcid);
        u8 key_data[32];
        int key_len = sizeof(key_data);
@@ -1213,7 +1226,7 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
                if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
-                       ieee80211_tx_info_clear_status(info);
+                       info->status.rates[0].count = 0;
                        info->status.rates[0].idx = -1;
                }
 
@@ -1584,12 +1597,12 @@ trigger:
        return true;
 }
 
-void mt7603_update_channel(struct mt76_dev *mdev)
+void mt7603_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
+       struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
        struct mt76_channel_state *state;
 
-       state = mdev->phy.chan_state;
+       state = mphy->chan_state;
        state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
 }
 
@@ -1806,7 +1819,7 @@ void mt7603_mac_work(struct work_struct *work)
        mutex_lock(&dev->mt76.mutex);
 
        dev->mphy.mac_work_count++;
-       mt76_update_survey(&dev->mt76);
+       mt76_update_survey(&dev->mphy);
        mt7603_edcca_check(dev);
 
        for (i = 0, idx = 0; i < 2; i++) {
index 1df5b9f..0fd46d9 100644 (file)
@@ -256,7 +256,7 @@ void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
 
 void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t);
 
-void mt7603_update_channel(struct mt76_dev *mdev);
+void mt7603_update_channel(struct mt76_phy *mphy);
 
 void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
 void mt7603_cca_stats_reset(struct mt7603_dev *dev);
index 6741e69..3b90109 100644 (file)
@@ -765,16 +765,4 @@ enum {
 #define MT_WTBL1_OR                    (MT_WTBL1_BASE + 0x2300)
 #define MT_WTBL1_OR_PSM_WRITE          BIT(31)
 
-enum mt7603_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_TKIP_NO_MIC,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_BIP_CMAC_128,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_WAPI,
-};
-
 #endif
index e8fc4a7..83f9861 100644 (file)
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
 
 obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
 obj-$(CONFIG_MT7615E) += mt7615e.o
index 676bb22..cb46597 100644 (file)
@@ -75,7 +75,7 @@ mt7615_pm_set(void *data, u64 val)
        if (!mt7615_wait_for_mcu_init(dev))
                return 0;
 
-       if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
+       if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
                return -EOPNOTSUPP;
 
        if (val == pm->enable)
@@ -319,24 +319,6 @@ mt7615_radio_read(struct seq_file *s, void *data)
        return 0;
 }
 
-static int mt7615_read_temperature(struct seq_file *s, void *data)
-{
-       struct mt7615_dev *dev = dev_get_drvdata(s->private);
-       int temp;
-
-       if (!mt7615_wait_for_mcu_init(dev))
-               return 0;
-
-       /* cpu */
-       mt7615_mutex_acquire(dev);
-       temp = mt7615_mcu_get_temperature(dev, 0);
-       mt7615_mutex_release(dev);
-
-       seq_printf(s, "Temperature: %d\n", temp);
-
-       return 0;
-}
-
 static int
 mt7615_queues_acq(struct seq_file *s, void *data)
 {
@@ -566,8 +548,6 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
 
        debugfs_create_file("reset_test", 0200, dir, dev,
                            &fops_reset_test);
-       debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
-                                   mt7615_read_temperature);
        debugfs_create_file("ext_mac_addr", 0600, dir, dev, &fops_ext_mac_addr);
 
        debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);
index 8004ae5..00aefea 100644 (file)
@@ -81,7 +81,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
        if (napi_complete(napi))
                mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
 
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return 0;
 }
@@ -99,7 +99,7 @@ static int mt7615_poll_rx(struct napi_struct *napi, int budget)
                return 0;
        }
        done = mt76_dma_rx_poll(napi, budget);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return done;
 }
@@ -222,14 +222,9 @@ void mt7615_dma_start(struct mt7615_dev *dev)
 int mt7615_dma_init(struct mt7615_dev *dev)
 {
        int rx_ring_size = MT7615_RX_RING_SIZE;
-       int rx_buf_size = MT_RX_BUF_SIZE;
        u32 mask;
        int ret;
 
-       /* Increase buffer size to receive large VHT MPDUs */
-       if (dev->mphy.cap.has_5ghz)
-               rx_buf_size *= 2;
-
        mt76_dma_attach(&dev->mt76);
 
        mt76_wr(dev, MT_WPDMA_GLO_CFG,
@@ -270,7 +265,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
 
        /* init rx queues */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
-                              MT7615_RX_MCU_RING_SIZE, rx_buf_size,
+                              MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
                               MT_RX_RING_BASE);
        if (ret)
                return ret;
@@ -279,7 +274,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
            rx_ring_size /= 2;
 
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
-                              rx_ring_size, rx_buf_size, MT_RX_RING_BASE);
+                              rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
        if (ret)
                return ret;
 
index d20f05a..2f1ac64 100644 (file)
@@ -8,11 +8,61 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 #include "mt7615.h"
 #include "mac.h"
 #include "mcu.h"
 #include "eeprom.h"
 
+static ssize_t mt7615_thermal_show_temp(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct mt7615_dev *mdev = dev_get_drvdata(dev);
+       int temperature;
+
+       if (!mt7615_wait_for_mcu_init(mdev))
+               return 0;
+
+       mt7615_mutex_acquire(mdev);
+       temperature = mt7615_mcu_get_temperature(mdev);
+       mt7615_mutex_release(mdev);
+
+       if (temperature < 0)
+               return temperature;
+
+       /* display in millidegree celcius */
+       return sprintf(buf, "%u\n", temperature * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7615_thermal_show_temp,
+                         NULL, 0);
+
+static struct attribute *mt7615_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(mt7615_hwmon);
+
+int mt7615_thermal_init(struct mt7615_dev *dev)
+{
+       struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+       struct device *hwmon;
+
+       if (!IS_REACHABLE(CONFIG_HWMON))
+               return 0;
+
+       hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
+                                                      wiphy_name(wiphy), dev,
+                                                      mt7615_hwmon_groups);
+       if (IS_ERR(hwmon))
+               return PTR_ERR(hwmon);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_thermal_init);
+
 static void
 mt7615_phy_init(struct mt7615_dev *dev)
 {
@@ -174,35 +224,6 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mt7615_wait_for_mcu_init);
 
-#define CCK_RATE(_idx, _rate) {                                                \
-       .bitrate = _rate,                                               \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                         \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),                    \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)),        \
-}
-
-#define OFDM_RATE(_idx, _rate) {                                       \
-       .bitrate = _rate,                                               \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),                   \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),             \
-}
-
-struct ieee80211_rate mt7615_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-EXPORT_SYMBOL_GPL(mt7615_rates);
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = 1,
@@ -362,7 +383,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
        wiphy->reg_notifier = mt7615_regd_notifier;
 
        wiphy->max_sched_scan_plan_interval =
-               MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
+               MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL;
        wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
        wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
        wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
@@ -472,8 +493,8 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
        for (i = 0; i <= MT_TXQ_PSD ; i++)
                mphy->q_tx[i] = dev->mphy.q_tx[i];
 
-       ret = mt76_register_phy(mphy, true, mt7615_rates,
-                               ARRAY_SIZE(mt7615_rates));
+       ret = mt76_register_phy(mphy, true, mt76_rates,
+                               ARRAY_SIZE(mt76_rates));
        if (ret)
                ieee80211_free_hw(mphy->hw);
 
index e2dcfee..ff3f85e 100644 (file)
@@ -20,7 +20,7 @@
 #define to_rssi(field, rxv)            ((FIELD_GET(field, rxv) - 220) / 2)
 
 static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
-       .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+       .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
        .radar_pattern = {
                [5] =  { 1, 0,  6, 32, 28, 0, 17,  990, 5010, 1, 1 },
                [6] =  { 1, 0,  9, 32, 28, 0, 27,  615, 5010, 1, 1 },
@@ -34,7 +34,7 @@ static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
 };
 
 static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
-       .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+       .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
        .radar_pattern = {
                [0] = { 1, 0,  9,  32, 28, 0, 13, 508, 3076, 1,  1 },
                [1] = { 1, 0, 12,  32, 28, 0, 17, 140,  240, 1,  1 },
@@ -45,7 +45,7 @@ static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
 };
 
 static const struct mt7615_dfs_radar_spec jp_radar_specs = {
-       .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+       .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
        .radar_pattern = {
                [0] =  { 1, 0,  8, 32, 28, 0, 13,  508, 3076, 1,  1 },
                [1] =  { 1, 0, 12, 32, 28, 0, 17,  140,  240, 1,  1 },
@@ -57,6 +57,33 @@ static const struct mt7615_dfs_radar_spec jp_radar_specs = {
        },
 };
 
+static enum mt76_cipher_type
+mt7615_mac_get_cipher(int cipher)
+{
+       switch (cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               return MT_CIPHER_WEP40;
+       case WLAN_CIPHER_SUITE_WEP104:
+               return MT_CIPHER_WEP104;
+       case WLAN_CIPHER_SUITE_TKIP:
+               return MT_CIPHER_TKIP;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               return MT_CIPHER_BIP_CMAC_128;
+       case WLAN_CIPHER_SUITE_CCMP:
+               return MT_CIPHER_AES_CCMP;
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               return MT_CIPHER_CCMP_256;
+       case WLAN_CIPHER_SUITE_GCMP:
+               return MT_CIPHER_GCMP;
+       case WLAN_CIPHER_SUITE_GCMP_256:
+               return MT_CIPHER_GCMP_256;
+       case WLAN_CIPHER_SUITE_SMS4:
+               return MT_CIPHER_WAPI;
+       default:
+               return MT_CIPHER_NONE;
+       }
+}
+
 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
                                            u8 idx, bool unicast)
 {
@@ -313,14 +340,27 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
@@ -1062,7 +1102,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
        idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
        addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
 
-       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+       mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
        sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
        sta->rate_set_tsf |= rd.rateset;
 
@@ -1078,7 +1118,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
 static int
 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                           struct ieee80211_key_conf *key,
-                          enum mt7615_cipher_type cipher, u16 cipher_mask,
+                          enum mt76_cipher_type cipher, u16 cipher_mask,
                           enum set_key_cmd cmd)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
@@ -1118,7 +1158,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
 
 static int
 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
-                         enum mt7615_cipher_type cipher, u16 cipher_mask,
+                         enum mt76_cipher_type cipher, u16 cipher_mask,
                          int keyidx, enum set_key_cmd cmd)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
@@ -1157,7 +1197,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
 
 static void
 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
-                             enum mt7615_cipher_type cipher, u16 cipher_mask,
+                             enum mt76_cipher_type cipher, u16 cipher_mask,
                              enum set_key_cmd cmd)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
@@ -1183,7 +1223,7 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
                              struct ieee80211_key_conf *key,
                              enum set_key_cmd cmd)
 {
-       enum mt7615_cipher_type cipher;
+       enum mt76_cipher_type cipher;
        u16 cipher_mask = wcid->cipher;
        int err;
 
@@ -1235,22 +1275,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
        int first_idx = 0, last_idx;
        int i, idx, count;
        bool fixed_rate, ack_timeout;
-       bool probe, ampdu, cck = false;
+       bool ampdu, cck = false;
        bool rs_idx;
        u32 rate_set_tsf;
        u32 final_rate, final_rate_flags, final_nss, txs;
 
-       fixed_rate = info->status.rates[0].count;
-       probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
-
        txs = le32_to_cpu(txs_data[1]);
-       ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
+       ampdu = txs & MT_TXS1_AMPDU;
 
        txs = le32_to_cpu(txs_data[3]);
        count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
        last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
 
        txs = le32_to_cpu(txs_data[0]);
+       fixed_rate = txs & MT_TXS0_FIXED_RATE;
        final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
        ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
 
@@ -1272,7 +1310,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
 
        first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
 
-       if (fixed_rate && !probe) {
+       if (fixed_rate) {
                info->status.rates[0].count = count;
                i = 0;
                goto out;
@@ -1391,7 +1429,7 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
                if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
-                       ieee80211_tx_info_clear_status(info);
+                       info->status.rates[0].count = 0;
                        info->status.rates[0].idx = -1;
                }
 
@@ -1821,43 +1859,41 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
        state->noise = -(phy->noise >> 4);
 }
 
-static void __mt7615_update_channel(struct mt7615_dev *dev)
+static void mt7615_update_survey(struct mt7615_dev *dev)
 {
        struct mt76_dev *mdev = &dev->mt76;
+       ktime_t cur_time;
+
+       /* MT7615 can only update both phys simultaneously
+        * since some reisters are shared across bands.
+        */
 
        mt7615_phy_update_channel(&mdev->phy, 0);
        if (mdev->phy2)
                mt7615_phy_update_channel(mdev->phy2, 1);
 
+       cur_time = ktime_get_boottime();
+
+       mt76_update_survey_active_time(&mdev->phy, cur_time);
+       if (mdev->phy2)
+               mt76_update_survey_active_time(mdev->phy2, cur_time);
+
        /* reset obss airtime */
        mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
 }
 
-void mt7615_update_channel(struct mt76_dev *mdev)
+void mt7615_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
 
        if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
                return;
 
-       __mt7615_update_channel(dev);
+       mt7615_update_survey(dev);
        mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
 }
 EXPORT_SYMBOL_GPL(mt7615_update_channel);
 
-static void mt7615_update_survey(struct mt7615_dev *dev)
-{
-       struct mt76_dev *mdev = &dev->mt76;
-       ktime_t cur_time;
-
-       __mt7615_update_channel(dev);
-       cur_time = ktime_get_boottime();
-
-       mt76_update_survey_active_time(&mdev->phy, cur_time);
-       if (mdev->phy2)
-               mt76_update_survey_active_time(mdev->phy2, cur_time);
-}
-
 static void
 mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
 {
@@ -1906,15 +1942,26 @@ void mt7615_pm_wake_work(struct work_struct *work)
        mphy = dev->phy.mt76;
 
        if (!mt7615_mcu_set_drv_ctrl(dev)) {
+               struct mt76_dev *mdev = &dev->mt76;
                int i;
 
-               mt76_for_each_q_rx(&dev->mt76, i)
-                       napi_schedule(&dev->mt76.napi[i]);
-               mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
-               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
-               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+               if (mt76_is_sdio(mdev)) {
+                       mt76_worker_schedule(&mdev->sdio.txrx_worker);
+               } else {
+                       mt76_for_each_q_rx(mdev, i)
+                               napi_schedule(&mdev->napi[i]);
+                       mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+                       mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
+                                             false);
+               }
+
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state)) {
+                       unsigned long timeout;
+
+                       timeout = mt7615_get_macwork_timeout(dev);
                        ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                                    MT7615_WATCHDOG_TIME);
+                                                    timeout);
+               }
        }
 
        ieee80211_wake_queues(mphy->hw);
@@ -1949,6 +1996,7 @@ void mt7615_mac_work(struct work_struct *work)
 {
        struct mt7615_phy *phy;
        struct mt76_phy *mphy;
+       unsigned long timeout;
 
        mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
                                               mac_work.work);
@@ -1967,8 +2015,9 @@ void mt7615_mac_work(struct work_struct *work)
        mt7615_mutex_release(phy->dev);
 
        mt76_tx_status_check(mphy->dev, NULL, false);
-       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                    MT7615_WATCHDOG_TIME);
+
+       timeout = mt7615_get_macwork_timeout(phy->dev);
+       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
 }
 
 void mt7615_tx_token_put(struct mt7615_dev *dev)
@@ -2049,14 +2098,12 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
 {
        const struct mt7615_dfs_radar_spec *radar_specs;
        struct mt7615_dev *dev = phy->dev;
-       int err, i;
+       int err, i, lpn = 500;
 
        switch (dev->mt76.region) {
        case NL80211_DFS_FCC:
                radar_specs = &fcc_radar_specs;
-               err = mt7615_mcu_set_fcc5_lpn(dev, 8);
-               if (err < 0)
-                       return err;
+               lpn = 8;
                break;
        case NL80211_DFS_ETSI:
                radar_specs = &etsi_radar_specs;
@@ -2068,6 +2115,11 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
                return -EINVAL;
        }
 
+       /* avoid FCC radar detection in non-FCC region */
+       err = mt7615_mcu_set_fcc5_lpn(dev, lpn);
+       if (err < 0)
+               return err;
+
        for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
                err = mt7615_mcu_set_radar_th(dev, i,
                                              &radar_specs->radar_pattern[i]);
index 6bf9da0..46f283e 100644 (file)
@@ -383,48 +383,6 @@ struct mt7615_dfs_radar_spec {
        struct mt7615_dfs_pattern radar_pattern[16];
 };
 
-enum mt7615_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_TKIP_NO_MIC,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_BIP_CMAC_128,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_WAPI,
-       MT_CIPHER_CCMP_256 = 10,
-       MT_CIPHER_GCMP,
-       MT_CIPHER_GCMP_256,
-};
-
-static inline enum mt7615_cipher_type
-mt7615_mac_get_cipher(int cipher)
-{
-       switch (cipher) {
-       case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
-       case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
-       case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
-       case WLAN_CIPHER_SUITE_AES_CMAC:
-               return MT_CIPHER_BIP_CMAC_128;
-       case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
-       case WLAN_CIPHER_SUITE_CCMP_256:
-               return MT_CIPHER_CCMP_256;
-       case WLAN_CIPHER_SUITE_GCMP:
-               return MT_CIPHER_GCMP;
-       case WLAN_CIPHER_SUITE_GCMP_256:
-               return MT_CIPHER_GCMP_256;
-       case WLAN_CIPHER_SUITE_SMS4:
-               return MT_CIPHER_WAPI;
-       default:
-               return MT_CIPHER_NONE;
-       }
-}
-
 static inline struct mt7615_txp_common *
 mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
 {
index 39733b3..dada43d 100644 (file)
@@ -28,6 +28,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
 {
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
+       unsigned long timeout;
        bool running;
        int ret;
 
@@ -78,8 +79,8 @@ static int mt7615_start(struct ieee80211_hw *hw)
 
        set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
 
-       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
-                                    MT7615_WATCHDOG_TIME);
+       timeout = mt7615_get_macwork_timeout(dev);
+       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
 
        if (!running)
                mt7615_mac_reset_counters(dev);
@@ -240,8 +241,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
        }
 
        ret = mt7615_mcu_add_dev_info(phy, vif, true);
-       if (ret)
-               goto out;
 out:
        mt7615_mutex_release(dev);
 
@@ -352,10 +351,12 @@ out:
        mt7615_mutex_release(dev);
 
        mt76_worker_schedule(&dev->mt76.tx_worker);
-       if (!mt76_testmode_enabled(phy->mt76))
+       if (!mt76_testmode_enabled(phy->mt76)) {
+               unsigned long timeout = mt7615_get_macwork_timeout(dev);
+
                ieee80211_queue_delayed_work(phy->mt76->hw,
-                                            &phy->mt76->mac_work,
-                                            MT7615_WATCHDOG_TIME);
+                                            &phy->mt76->mac_work, timeout);
+       }
 
        return ret;
 }
@@ -695,7 +696,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
        msta->n_rates = i;
        if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) {
                mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
-               mt76_connac_pm_unref(&dev->pm);
+               mt76_connac_pm_unref(phy->mt76, &dev->pm);
        }
        spin_unlock_bh(&dev->mt76.lock);
 }
@@ -711,7 +712,7 @@ void mt7615_tx_worker(struct mt76_worker *w)
        }
 
        mt76_tx_worker_run(&dev->mt76);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 static void mt7615_tx(struct ieee80211_hw *hw,
@@ -741,7 +742,7 @@ static void mt7615_tx(struct ieee80211_hw *hw,
 
        if (mt76_connac_pm_ref(mphy, &dev->pm)) {
                mt76_tx(mphy, control->sta, wcid, skb);
-               mt76_connac_pm_unref(&dev->pm);
+               mt76_connac_pm_unref(mphy, &dev->pm);
                return;
        }
 
@@ -881,7 +882,8 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        mt7615_mutex_acquire(dev);
 
-       mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
+       /* TSF read */
+       mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_READ);
        tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
        tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
 
@@ -911,7 +913,33 @@ mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
        mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
        /* TSF software overwrite */
-       mt76_set(dev, reg, MT_LPON_TCR_WRITE);
+       mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_WRITE);
+
+       mt7615_mutex_release(dev);
+}
+
+static void
+mt7615_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 s64 timestamp)
+{
+       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       union {
+               u64 t64;
+               u32 t32[2];
+       } tsf = { .t64 = timestamp, };
+       u16 idx = mvif->mt76.omac_idx;
+       u32 reg;
+
+       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
+       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
+
+       mt7615_mutex_acquire(dev);
+
+       mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
+       mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
+       /* TSF software adjust*/
+       mt76_rmw(dev, reg, MT_LPON_TCR_MODE, MT_LPON_TCR_ADJUST);
 
        mt7615_mutex_release(dev);
 }
@@ -1162,7 +1190,7 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
        else
                clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
 
-       mt7615_mcu_sta_update_hdr_trans(dev, vif, sta);
+       mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
 }
 
 #ifdef CONFIG_PM
@@ -1200,6 +1228,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
 {
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       unsigned long timeout;
        bool running;
 
        mt7615_mutex_acquire(dev);
@@ -1223,8 +1252,8 @@ static int mt7615_resume(struct ieee80211_hw *hw)
                                            mt76_connac_mcu_set_suspend_iter,
                                            phy->mt76);
 
-       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
-                                    MT7615_WATCHDOG_TIME);
+       timeout = mt7615_get_macwork_timeout(dev);
+       ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, timeout);
 
        mt7615_mutex_release(dev);
 
@@ -1278,6 +1307,7 @@ const struct ieee80211_ops mt7615_ops = {
        .get_stats = mt7615_get_stats,
        .get_tsf = mt7615_get_tsf,
        .set_tsf = mt7615_set_tsf,
+       .offset_tsf = mt7615_offset_tsf,
        .get_survey = mt76_get_survey,
        .get_antenna = mt76_get_antenna,
        .set_antenna = mt7615_set_antenna,
index aa42af9..f8a0969 100644 (file)
@@ -411,6 +411,9 @@ mt7615_mcu_rx_csa_notify(struct mt7615_dev *dev, struct sk_buff *skb)
 
        c = (struct mt7615_mcu_csa_notify *)skb->data;
 
+       if (c->omac_idx > EXT_BSSID_MAX)
+               return;
+
        if (ext_phy && ext_phy->omac_mask & BIT_ULL(c->omac_idx))
                mphy = dev->mt76.phy2;
 
@@ -427,6 +430,10 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
 
        r = (struct mt7615_mcu_rdd_report *)skb->data;
 
+       if (!dev->radar_pattern.n_pulses && !r->long_detected &&
+           !r->constant_prf_detected && !r->staggered_prf_detected)
+               return;
+
        if (r->band_idx && dev->mt76.phy2)
                mphy = dev->mt76.phy2;
 
@@ -1021,9 +1028,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
        if (IS_ERR(sskb))
                return PTR_ERR(sskb);
 
-       mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable);
+       mt76_connac_mcu_sta_basic_tlv(sskb, vif, sta, enable, true);
        if (enable && sta)
-               mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0);
+               mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
+                                       MT76_STA_INFO_STATE_ASSOC);
 
        wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
                                                  WTBL_RESET_AND_SET, NULL,
@@ -1037,8 +1045,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                if (sta)
                        mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
                                                    NULL, wtbl_hdr);
-               mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, &msta->wcid, NULL,
-                                                  wtbl_hdr);
+               mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
+                                                  NULL, wtbl_hdr);
        }
 
        cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
@@ -1058,6 +1066,26 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
        return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
 }
 
+static int
+mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta)
+{
+       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+       struct wtbl_req_hdr *wtbl_hdr;
+       struct sk_buff *skb = NULL;
+
+       wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+                                                 WTBL_SET, NULL, &skb);
+       if (IS_ERR(wtbl_hdr))
+               return PTR_ERR(wtbl_hdr);
+
+       mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
+                                          wtbl_hdr);
+       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
+                                    true);
+}
+
 static const struct mt7615_mcu_ops wtbl_update_ops = {
        .add_beacon_offload = mt7615_mcu_add_beacon_offload,
        .set_pm_state = mt7615_mcu_ctrl_pm_state,
@@ -1068,6 +1096,7 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
        .sta_add = mt7615_mcu_wtbl_sta_add,
        .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
        .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+       .set_sta_decap_offload = mt7615_mcu_wtbl_update_hdr_trans,
 };
 
 static int
@@ -1120,18 +1149,21 @@ mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
 
 static int
 __mt7615_mcu_add_sta(struct mt76_phy *phy, struct ieee80211_vif *vif,
-                    struct ieee80211_sta *sta, bool enable, int cmd)
+                    struct ieee80211_sta *sta, bool enable, int cmd,
+                    bool offload_fw)
 {
        struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
        struct mt76_sta_cmd_info info = {
                .sta = sta,
                .vif = vif,
+               .offload_fw = offload_fw,
                .enable = enable,
+               .newly = true,
                .cmd = cmd,
        };
 
        info.wcid = sta ? (struct mt76_wcid *)sta->drv_priv : &mvif->sta.wcid;
-       return mt76_connac_mcu_add_sta_cmd(phy, &info);
+       return mt76_connac_mcu_sta_cmd(phy, &info);
 }
 
 static int
@@ -1139,7 +1171,19 @@ mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta, bool enable)
 {
        return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
-                                   MCU_EXT_CMD_STA_REC_UPDATE);
+                                   MCU_EXT_CMD_STA_REC_UPDATE, false);
+}
+
+static int
+mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta)
+{
+       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+       return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
+                                                   vif, &msta->wcid,
+                                                   MCU_EXT_CMD_STA_REC_UPDATE);
 }
 
 static const struct mt7615_mcu_ops sta_update_ops = {
@@ -1152,27 +1196,9 @@ static const struct mt7615_mcu_ops sta_update_ops = {
        .sta_add = mt7615_mcu_add_sta,
        .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
        .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+       .set_sta_decap_offload = mt7615_mcu_sta_update_hdr_trans,
 };
 
-int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
-                                   struct ieee80211_vif *vif,
-                                   struct ieee80211_sta *sta)
-{
-       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
-       struct wtbl_req_hdr *wtbl_hdr;
-       struct sk_buff *skb = NULL;
-
-       wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
-                                                 WTBL_SET, NULL, &skb);
-       if (IS_ERR(wtbl_hdr))
-               return PTR_ERR(wtbl_hdr);
-
-       mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, &msta->wcid, NULL, wtbl_hdr);
-
-       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
-                                    true);
-}
-
 static int
 mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
 {
@@ -1280,7 +1306,7 @@ mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta, bool enable)
 {
        return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable,
-                                   MCU_UNI_CMD_STA_REC_UPDATE);
+                                   MCU_UNI_CMD_STA_REC_UPDATE, true);
 }
 
 static int
@@ -1338,6 +1364,18 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
                                     MCU_UNI_CMD_STA_REC_UPDATE, true);
 }
 
+static int
+mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta)
+{
+       struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+       return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76,
+                                                   vif, &msta->wcid,
+                                                   MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
 static const struct mt7615_mcu_ops uni_update_ops = {
        .add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
        .set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
@@ -1348,6 +1386,7 @@ static const struct mt7615_mcu_ops uni_update_ops = {
        .sta_add = mt7615_mcu_uni_add_sta,
        .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
        .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+       .set_sta_decap_offload = mt7615_mcu_sta_uni_update_hdr_trans,
 };
 
 int mt7615_mcu_restart(struct mt76_dev *dev)
@@ -2322,14 +2361,12 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
        return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
 }
 
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev)
 {
        struct {
                u8 action;
                u8 rsv[3];
-       } req = {
-               .action = index,
-       };
+       } req = {};
 
        return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
                                 sizeof(req), true);
index 202ea23..71719c7 100644 (file)
@@ -229,7 +229,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
                               GFP_KERNEL);
        if (!bus_ops) {
                ret = -ENOMEM;
-               goto error;
+               goto err_free_dev;
        }
 
        bus_ops->rr = mt7615_rr;
@@ -242,17 +242,20 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
        ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
                               IRQF_SHARED, KBUILD_MODNAME, dev);
        if (ret)
-               goto error;
+               goto err_free_dev;
 
        if (is_mt7663(mdev))
                mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
 
        ret = mt7615_register_device(dev);
        if (ret)
-               goto error;
+               goto err_free_irq;
 
        return 0;
-error:
+
+err_free_irq:
+       devm_free_irq(pdev, irq, dev);
+err_free_dev:
        mt76_free_device(&dev->mt76);
 
        return ret;
index 989f05e..d0c64a9 100644 (file)
@@ -20,7 +20,6 @@
                                         MT7615_MAX_INTERFACES)
 
 #define MT7615_PM_TIMEOUT              (HZ / 12)
-#define MT7615_WATCHDOG_TIME           (HZ / 10)
 #define MT7615_HW_SCAN_TIMEOUT         (HZ / 10)
 #define MT7615_RESET_TIMEOUT           (30 * HZ)
 #define MT7615_RATE_RETRY              2
@@ -202,6 +201,7 @@ struct mt7615_phy {
 #define mt7615_mcu_set_pm(dev, ...)    (dev)->mcu_ops->set_pm_state((dev),  __VA_ARGS__)
 #define mt7615_mcu_set_drv_ctrl(dev)   (dev)->mcu_ops->set_drv_ctrl((dev))
 #define mt7615_mcu_set_fw_ctrl(dev)    (dev)->mcu_ops->set_fw_ctrl((dev))
+#define mt7615_mcu_set_sta_decap_offload(dev, ...) (dev)->mcu_ops->set_sta_decap_offload((dev), __VA_ARGS__)
 struct mt7615_mcu_ops {
        int (*add_tx_ba)(struct mt7615_dev *dev,
                         struct ieee80211_ampdu_params *params,
@@ -221,6 +221,9 @@ struct mt7615_mcu_ops {
        int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
        int (*set_drv_ctrl)(struct mt7615_dev *dev);
        int (*set_fw_ctrl)(struct mt7615_dev *dev);
+       int (*set_sta_decap_offload)(struct mt7615_dev *dev,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_sta *sta);
 };
 
 struct mt7615_dev {
@@ -356,6 +359,7 @@ static inline int mt7622_wmac_init(struct mt7615_dev *dev)
 }
 #endif
 
+int mt7615_thermal_init(struct mt7615_dev *dev);
 int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
                      int irq, const u32 *map);
 u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
@@ -456,6 +460,12 @@ static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
        return MT_INT_TX_DONE(dev->mt76.q_mcu[MT_MCUQ_WM]->hw_idx);
 }
 
+static inline unsigned long
+mt7615_get_macwork_timeout(struct mt7615_dev *dev)
+{
+       return dev->pm.enable ? HZ / 3 : HZ / 10;
+}
+
 void mt7615_dma_reset(struct mt7615_dev *dev);
 void mt7615_scan_work(struct work_struct *work);
 void mt7615_roc_work(struct work_struct *work);
@@ -466,7 +476,7 @@ int mt7615_set_channel(struct mt7615_phy *phy);
 void mt7615_init_work(struct mt7615_dev *dev);
 
 int mt7615_mcu_restart(struct mt76_dev *dev);
-void mt7615_update_channel(struct mt76_dev *mdev);
+void mt7615_update_channel(struct mt76_phy *mphy);
 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
 void mt7615_mac_reset_counters(struct mt7615_dev *dev);
 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
@@ -494,7 +504,7 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
 int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
 int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
 int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev);
 int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
 void mt7615_mcu_exit(struct mt7615_dev *dev);
 void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
@@ -518,9 +528,6 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
 void mt7615_mac_work(struct work_struct *work);
 void mt7615_txp_skb_unmap(struct mt76_dev *dev,
                          struct mt76_txwi_cache *txwi);
-int mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev,
-                                   struct ieee80211_vif *vif,
-                                   struct ieee80211_sta *sta);
 int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev);
 int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
 int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
index ec8ec1a..a2465b4 100644 (file)
@@ -98,7 +98,7 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
        addr = mt7615_reg_map(dev, MT_LED_CTRL);
        mt76_wr(dev, addr, val);
 
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 static int
@@ -147,8 +147,12 @@ int mt7615_register_device(struct mt7615_dev *dev)
        if (ret)
                return ret;
 
-       ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
-                                  ARRAY_SIZE(mt7615_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
+       if (ret)
+               return ret;
+
+       ret = mt7615_thermal_init(dev);
        if (ret)
                return ret;
 
index d7cbef7..da87c02 100644 (file)
@@ -131,20 +131,21 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                          struct mt76_tx_info *tx_info)
 {
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-       struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
        struct ieee80211_key_conf *key = info->control.hw_key;
        int pid, id;
        u8 *txwi = (u8 *)txwi_ptr;
        struct mt76_txwi_cache *t;
+       struct mt7615_sta *msta;
        void *txp;
 
+       msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
        if (!wcid)
                wcid = &dev->mt76.global_wcid;
 
        pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
 
-       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+       if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
                struct mt7615_phy *phy = &dev->phy;
 
                if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
@@ -267,6 +268,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
        struct mt7615_phy *phy2;
        struct mt76_phy *ext_phy;
        struct mt7615_dev *dev;
+       unsigned long timeout;
 
        dev = container_of(work, struct mt7615_dev, reset_work);
        ext_phy = dev->mt76.phy2;
@@ -344,11 +346,11 @@ void mt7615_mac_reset_work(struct work_struct *work)
 
        mt7615_mutex_release(dev);
 
+       timeout = mt7615_get_macwork_timeout(dev);
        ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
-                                    MT7615_WATCHDOG_TIME);
+                                    timeout);
        if (phy2)
                ieee80211_queue_delayed_work(ext_phy->hw,
-                                            &phy2->mt76->mac_work,
-                                            MT7615_WATCHDOG_TIME);
+                                            &phy2->mt76->mac_work, timeout);
 
 }
index 63c081b..6712ad9 100644 (file)
@@ -463,7 +463,9 @@ enum mt7615_reg_base {
 #define MT_LPON_TCR0(_n)               MT_LPON(0x010 + ((_n) * 4))
 #define MT_LPON_TCR2(_n)               MT_LPON(0x0f8 + ((_n) - 2) * 4)
 #define MT_LPON_TCR_MODE               GENMASK(1, 0)
+#define MT_LPON_TCR_READ               GENMASK(1, 0)
 #define MT_LPON_TCR_WRITE              BIT(0)
+#define MT_LPON_TCR_ADJUST             BIT(1)
 
 #define MT_LPON_UTTR0                  MT_LPON(0x018)
 #define MT_LPON_UTTR1                  MT_LPON(0x01c)
index 0518097..03877d8 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+/* SPDX-License-Identifier: ISC */
 /* Copyright (C) 2020 MediaTek Inc.
  *
  * Author: Sean Wang <sean.wang@mediatek.com>
index d1be78b..45c1cd3 100644 (file)
@@ -55,6 +55,7 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
        struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt76_connac_pm *pm = &dev->pm;
        u32 status;
        int ret;
 
@@ -66,37 +67,45 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
                                 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
        if (ret < 0) {
                dev_err(dev->mt76.dev, "Cannot get ownership from device");
-               set_bit(MT76_STATE_PM, &mphy->state);
-               sdio_release_host(func);
+       } else {
+               clear_bit(MT76_STATE_PM, &mphy->state);
 
-               return ret;
+               pm->stats.last_wake_event = jiffies;
+               pm->stats.doze_time += pm->stats.last_wake_event -
+                                      pm->stats.last_doze_event;
        }
-
        sdio_release_host(func);
-       dev->pm.last_activity = jiffies;
 
-       return 0;
+       return ret;
 }
 
 static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
 {
        struct mt76_phy *mphy = &dev->mt76.phy;
+       int ret = 0;
 
-       if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
-               return __mt7663s_mcu_drv_pmctrl(dev);
+       mutex_lock(&dev->pm.mutex);
 
-       return 0;
+       if (test_bit(MT76_STATE_PM, &mphy->state))
+               ret = __mt7663s_mcu_drv_pmctrl(dev);
+
+       mutex_unlock(&dev->pm.mutex);
+
+       return ret;
 }
 
 static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
        struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt76_connac_pm *pm = &dev->pm;
+       int ret = 0;
        u32 status;
-       int ret;
 
-       if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
-               return 0;
+       mutex_lock(&pm->mutex);
+
+       if (mt76_connac_skip_fw_pmctrl(mphy, pm))
+               goto out;
 
        sdio_claim_host(func);
 
@@ -107,9 +116,15 @@ static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
        if (ret < 0) {
                dev_err(dev->mt76.dev, "Cannot set ownership to device");
                clear_bit(MT76_STATE_PM, &mphy->state);
+       } else {
+               pm->stats.last_doze_event = jiffies;
+               pm->stats.awake_time += pm->stats.last_doze_event -
+                                       pm->stats.last_wake_event;
        }
 
        sdio_release_host(func);
+out:
+       mutex_unlock(&pm->mutex);
 
        return ret;
 }
index 4393dd2..04f4c89 100644 (file)
@@ -283,9 +283,15 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
 {
        struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
                                              txrx_worker);
-       struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+       struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio);
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        int i, nframes, ret;
 
+       if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+               queue_work(mdev->wq, &dev->pm.wake_work);
+               return;
+       }
+
        /* disable interrupt */
        sdio_claim_host(sdio->func);
        sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
@@ -295,16 +301,16 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
 
                /* tx */
                for (i = 0; i <= MT_TXQ_PSD; i++) {
-                       ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
+                       ret = mt7663s_tx_run_queue(mdev, mdev->phy.q_tx[i]);
                        if (ret > 0)
                                nframes += ret;
                }
-               ret = mt7663s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
+               ret = mt7663s_tx_run_queue(mdev, mdev->q_mcu[MT_MCUQ_WM]);
                if (ret > 0)
                        nframes += ret;
 
                /* rx */
-               ret = mt7663s_rx_handler(dev);
+               ret = mt7663s_rx_handler(mdev);
                if (ret > 0)
                        nframes += ret;
        } while (nframes > 0);
@@ -312,6 +318,8 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
        /* enable interrupt */
        sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
        sdio_release_host(sdio->func);
+
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 void mt7663s_sdio_irq(struct sdio_func *func)
index f8d3673..996d48c 100644 (file)
@@ -123,7 +123,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
        idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
        addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
 
-       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
+       mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
        val = mt76_rr(dev, MT_LPON_UTTR0);
        sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
 
@@ -191,14 +191,15 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                                   struct ieee80211_sta *sta,
                                   struct mt76_tx_info *tx_info)
 {
-       struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        struct sk_buff *skb = tx_info->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mt7615_sta *msta;
        int pad;
 
+       msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
-           !msta->rate_probe) {
+           msta && !msta->rate_probe) {
                /* request to configure sampling rate */
                spin_lock_bh(&dev->mt76.lock);
                mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
@@ -323,8 +324,8 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
                        hw->max_tx_fragments = 1;
        }
 
-       err = mt76_register_device(&dev->mt76, true, mt7615_rates,
-                                  ARRAY_SIZE(mt7615_rates));
+       err = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
        if (err < 0)
                return err;
 
index 6c889b9..f49d97d 100644 (file)
@@ -7,12 +7,13 @@
 #include "mt76.h"
 
 #define MT76_CONNAC_SCAN_IE_LEN                        600
-#define MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL    10
+#define MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL         10
+#define MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL U16_MAX
 #define MT76_CONNAC_MAX_SCHED_SCAN_SSID                10
 #define MT76_CONNAC_MAX_SCAN_MATCH             16
 
 #define MT76_CONNAC_COREDUMP_TIMEOUT           (HZ / 20)
-#define MT76_CONNAC_COREDUMP_SZ                        (128 * 1024)
+#define MT76_CONNAC_COREDUMP_SZ                        (1300 * 1024)
 
 enum {
        CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20,
@@ -45,6 +46,8 @@ enum {
 
 struct mt76_connac_pm {
        bool enable;
+       bool ds_enable;
+       bool suspended;
 
        spinlock_t txq_lock;
        struct {
@@ -116,19 +119,27 @@ out:
 }
 
 static inline void
-mt76_connac_pm_unref(struct mt76_connac_pm *pm)
+mt76_connac_pm_unref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
 {
        spin_lock_bh(&pm->wake.lock);
-       pm->wake.count--;
+
        pm->last_activity = jiffies;
+       if (--pm->wake.count == 0 &&
+           test_bit(MT76_STATE_MCU_RUNNING, &phy->state))
+               mt76_connac_power_save_sched(phy, pm);
+
        spin_unlock_bh(&pm->wake.lock);
 }
 
 static inline bool
 mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm)
 {
+       struct mt76_dev *dev = phy->dev;
        bool ret;
 
+       if (dev->token_count)
+               return true;
+
        spin_lock_bh(&pm->wake.lock);
        ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state);
        spin_unlock_bh(&pm->wake.lock);
index 6f180c9..af43bcb 100644 (file)
@@ -10,13 +10,16 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
        if (!pm->enable)
                return 0;
 
-       if (!mt76_is_mmio(dev))
+       if (mt76_is_usb(dev))
                return 0;
 
        cancel_delayed_work_sync(&pm->ps_work);
        if (!test_bit(MT76_STATE_PM, &phy->state))
                return 0;
 
+       if (pm->suspended)
+               return 0;
+
        queue_work(dev->wq, &pm->wake_work);
        if (!wait_event_timeout(pm->wait,
                                !test_bit(MT76_STATE_PM, &phy->state),
@@ -34,12 +37,15 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
 {
        struct mt76_dev *dev = phy->dev;
 
-       if (!mt76_is_mmio(dev))
+       if (mt76_is_usb(dev))
                return;
 
        if (!pm->enable)
                return;
 
+       if (pm->suspended)
+               return;
+
        pm->last_activity = jiffies;
 
        if (!test_bit(MT76_STATE_PM, &phy->state)) {
index 6195616..5c3a81e 100644 (file)
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
 void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
                                   struct ieee80211_vif *vif,
                                   struct ieee80211_sta *sta,
-                                  bool enable)
+                                  bool enable, bool newly)
 {
        struct sta_rec_basic *basic;
        struct tlv *tlv;
@@ -316,7 +316,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
        basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
 
        if (enable) {
-               basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+               if (newly)
+                       basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
                basic->conn_state = CONN_STATE_PORT_SECURE;
        } else {
                basic->conn_state = CONN_STATE_DISCONNECT;
@@ -393,6 +394,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
 }
 
 void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+                                       struct ieee80211_vif *vif,
                                        struct mt76_wcid *wcid,
                                        void *sta_wtbl, void *wtbl_tlv)
 {
@@ -404,9 +406,46 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
                                             wtbl_tlv, sta_wtbl);
        htr = (struct wtbl_hdr_trans *)tlv;
        htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+
+       if (vif->type == NL80211_IFTYPE_STATION)
+               htr->to_ds = true;
+       else
+               htr->from_ds = true;
+
+       if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
+               htr->to_ds = true;
+               htr->from_ds = true;
+       }
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_hdr_trans_tlv);
 
+int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
+                                        struct ieee80211_vif *vif,
+                                        struct mt76_wcid *wcid, int cmd)
+{
+       struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+       struct wtbl_req_hdr *wtbl_hdr;
+       struct tlv *sta_wtbl;
+       struct sk_buff *skb;
+
+       skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+                                          sizeof(struct tlv));
+
+       wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET,
+                                                 sta_wtbl, &skb);
+       if (IS_ERR(wtbl_hdr))
+               return PTR_ERR(wtbl_hdr);
+
+       mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, sta_wtbl, wtbl_hdr);
+
+       return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
+
 void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
                                      struct sk_buff *skb,
                                      struct ieee80211_vif *vif,
@@ -671,7 +710,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
 void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
                             struct ieee80211_sta *sta,
                             struct ieee80211_vif *vif,
-                            u8 rcpi)
+                            u8 rcpi, u8 sta_state)
 {
        struct cfg80211_chan_def *chandef = &mphy->chandef;
        enum nl80211_band band = chandef->chan->band;
@@ -736,7 +775,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
 
        tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state));
        state = (struct sta_rec_state *)tlv;
-       state->state = 2;
+       state->state = sta_state;
 
        if (sta->vht_cap.vht_supported) {
                state->vht_opmode = sta->bandwidth;
@@ -828,8 +867,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ht_tlv);
 
-int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
-                               struct mt76_sta_cmd_info *info)
+int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
+                           struct mt76_sta_cmd_info *info)
 {
        struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
        struct mt76_dev *dev = phy->dev;
@@ -841,10 +880,13 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
-       mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta, info->enable);
-       if (info->enable && info->sta)
-               mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif,
-                                       info->rcpi);
+       if (info->sta || !info->offload_fw)
+               mt76_connac_mcu_sta_basic_tlv(skb, info->vif, info->sta,
+                                             info->enable, info->newly);
+       if (info->sta && info->enable)
+               mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
+                                       info->vif, info->rcpi,
+                                       info->state);
 
        sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
                                           sizeof(struct tlv));
@@ -859,6 +901,8 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
                mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
                                                 info->sta, sta_wtbl,
                                                 wtbl_hdr);
+               mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
+                                                  sta_wtbl, wtbl_hdr);
                if (info->sta)
                        mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
                                                    sta_wtbl, wtbl_hdr);
@@ -866,7 +910,7 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
 
        return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
 }
-EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_sta_cmd);
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_cmd);
 
 void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                 struct ieee80211_ampdu_params *params,
@@ -895,8 +939,10 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                ba->rst_ba_sb = 1;
        }
 
-       if (is_mt7921(dev))
+       if (is_mt7921(dev)) {
+               ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
                return;
+       }
 
        if (enable && tx) {
                u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
@@ -1271,6 +1317,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
                                u8 pad[3];
                        } __packed hdr;
                        struct bss_info_uni_he he;
+                       struct bss_info_uni_bss_color bss_color;
                } he_req = {
                        .hdr = {
                                .bss_idx = mvif->idx,
@@ -1279,8 +1326,21 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
                                .tag = cpu_to_le16(UNI_BSS_INFO_HE_BASIC),
                                .len = cpu_to_le16(sizeof(struct bss_info_uni_he)),
                        },
+                       .bss_color = {
+                               .tag = cpu_to_le16(UNI_BSS_INFO_BSS_COLOR),
+                               .len = cpu_to_le16(sizeof(struct bss_info_uni_bss_color)),
+                               .enable = 0,
+                               .bss_color = 0,
+                       },
                };
 
+               if (enable) {
+                       he_req.bss_color.enable =
+                               vif->bss_conf.he_bss_color.enabled;
+                       he_req.bss_color.bss_color =
+                               vif->bss_conf.he_bss_color.color;
+               }
+
                mt76_connac_mcu_uni_bss_he_tlv(phy, vif,
                                               (struct tlv *)&he_req.he);
                err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE,
@@ -1463,14 +1523,16 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
        req->version = 1;
        req->seq_num = mvif->scan_seq_num | ext_phy << 7;
 
-       if (is_mt7663(phy->dev) &&
-           (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)) {
-               get_random_mask_addr(req->mt7663.random_mac, sreq->mac_addr,
-                                    sreq->mac_addr_mask);
+       if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+               u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
+                                              : req->mt7921.random_mac;
+
                req->scan_func = 1;
-       } else if (is_mt7921(phy->dev)) {
-               req->mt7921.bss_idx = mvif->idx;
+               get_random_mask_addr(addr, sreq->mac_addr,
+                                    sreq->mac_addr_mask);
        }
+       if (is_mt7921(phy->dev))
+               req->mt7921.bss_idx = mvif->idx;
 
        req->ssids_num = sreq->n_ssids;
        for (i = 0; i < req->ssids_num; i++) {
@@ -1556,6 +1618,26 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
 
+int mt76_connac_sta_state_dp(struct mt76_dev *dev,
+                            enum ieee80211_sta_state old_state,
+                            enum ieee80211_sta_state new_state)
+{
+       if ((old_state == IEEE80211_STA_ASSOC &&
+            new_state == IEEE80211_STA_AUTHORIZED) ||
+           (old_state == IEEE80211_STA_NONE &&
+            new_state == IEEE80211_STA_NOTEXIST))
+               mt76_connac_mcu_set_deep_sleep(dev, true);
+
+       if ((old_state == IEEE80211_STA_NOTEXIST &&
+            new_state == IEEE80211_STA_NONE) ||
+           (old_state == IEEE80211_STA_AUTHORIZED &&
+            new_state == IEEE80211_STA_ASSOC))
+               mt76_connac_mcu_set_deep_sleep(dev, false);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_sta_state_dp);
+
 void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
                                    struct mt76_connac_coredump *coredump)
 {
@@ -1570,6 +1652,60 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
 
+int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
+{
+       struct mt76_connac_cap_hdr {
+               __le16 n_element;
+               u8 rsv[2];
+       } __packed * hdr;
+       struct sk_buff *skb;
+       int ret, i;
+
+       ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL,
+                                       0, true, &skb);
+       if (ret)
+               return ret;
+
+       hdr = (struct mt76_connac_cap_hdr *)skb->data;
+       if (skb->len < sizeof(*hdr)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       skb_pull(skb, sizeof(*hdr));
+
+       for (i = 0; i < le16_to_cpu(hdr->n_element); i++) {
+               struct tlv_hdr {
+                       __le32 type;
+                       __le32 len;
+               } __packed * tlv = (struct tlv_hdr *)skb->data;
+               int len;
+
+               if (skb->len < sizeof(*tlv))
+                       break;
+
+               skb_pull(skb, sizeof(*tlv));
+
+               len = le32_to_cpu(tlv->len);
+               if (skb->len < len)
+                       break;
+
+               switch (le32_to_cpu(tlv->type)) {
+               case MT_NIC_CAP_6G:
+                       phy->cap.has_6ghz = skb->data[0];
+                       break;
+               default:
+                       break;
+               }
+               skb_pull(skb, len);
+       }
+out:
+       dev_kfree_skb(skb);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_get_nic_capability);
+
 static void
 mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
                          struct mt76_power_limits *limits,
@@ -1632,12 +1768,15 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
                142, 144, 149, 151, 153, 155, 157,
                159, 161, 165
        };
+       int i, n_chan, batch_size, idx = 0, tx_power, last_ch;
        struct mt76_connac_sku_tlv sku_tlbv;
-       int i, n_chan, batch_size, idx = 0;
        struct mt76_power_limits limits;
        const u8 *ch_list;
 
        sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
+       tx_power = 2 * phy->hw->conf.power_level;
+       if (!tx_power)
+               tx_power = 127;
 
        if (band == NL80211_BAND_2GHZ) {
                n_chan = ARRAY_SIZE(chan_list_2ghz);
@@ -1648,39 +1787,48 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
        }
        batch_size = DIV_ROUND_UP(n_chan, batch_len);
 
+       if (!phy->cap.has_5ghz)
+               last_ch = chan_list_2ghz[n_chan - 1];
+       else
+               last_ch = chan_list_5ghz[n_chan - 1];
+
        for (i = 0; i < batch_size; i++) {
-               bool last_msg = i == batch_size - 1;
-               int num_ch = last_msg ? n_chan % batch_len : batch_len;
                struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
                        .band = band == NL80211_BAND_2GHZ ? 1 : 2,
-                       .n_chan = num_ch,
-                       .last_msg = last_msg,
                };
+               int j, err, msg_len, num_ch;
                struct sk_buff *skb;
-               int j, err, msg_len;
 
+               num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
                msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
                skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
                if (!skb)
                        return -ENOMEM;
 
+               skb_reserve(skb, sizeof(tx_power_tlv));
+
                BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
                memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
+               tx_power_tlv.n_chan = num_ch;
 
-               skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv));
                for (j = 0; j < num_ch; j++, idx++) {
                        struct ieee80211_channel chan = {
                                .hw_value = ch_list[idx],
                                .band = band,
                        };
 
-                       mt76_get_rate_power_limits(phy, &chan, &limits, 127);
+                       mt76_get_rate_power_limits(phy, &chan, &limits,
+                                                  tx_power);
 
+                       tx_power_tlv.last_msg = ch_list[idx] == last_ch;
                        sku_tlbv.channel = ch_list[idx];
+
                        mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
                                                  &limits, band);
                        skb_put_data(skb, &sku_tlbv, sku_len);
                }
+               __skb_push(skb, sizeof(tx_power_tlv));
+               memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv));
 
                err = mt76_mcu_skb_send_msg(dev, skb,
                                            MCU_CMD_SET_RATE_TX_POWER, false);
@@ -1695,11 +1843,20 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
 {
        int err;
 
-       err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ);
-       if (err < 0)
-               return err;
+       if (phy->cap.has_2ghz) {
+               err = mt76_connac_mcu_rate_txpower_band(phy,
+                                                       NL80211_BAND_2GHZ);
+               if (err < 0)
+                       return err;
+       }
+       if (phy->cap.has_5ghz) {
+               err = mt76_connac_mcu_rate_txpower_band(phy,
+                                                       NL80211_BAND_5GHZ);
+               if (err < 0)
+                       return err;
+       }
 
-       return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
 
@@ -1939,7 +2096,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
        ptlv->index = index;
 
        memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len);
-       memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8);
+       memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8));
 
        return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true);
 }
@@ -1974,14 +2131,17 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif,
        };
 
        if (wowlan->magic_pkt)
-               req.wow_ctrl_tlv.trigger |= BIT(0);
+               req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_MAGIC;
        if (wowlan->disconnect)
-               req.wow_ctrl_tlv.trigger |= BIT(2);
+               req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT |
+                                            UNI_WOW_DETECT_TYPE_BCN_LOST);
        if (wowlan->nd_config) {
                mt76_connac_mcu_sched_scan_req(phy, vif, wowlan->nd_config);
-               req.wow_ctrl_tlv.trigger |= BIT(5);
+               req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT;
                mt76_connac_mcu_sched_scan_enable(phy, vif, suspend);
        }
+       if (wowlan->n_patterns)
+               req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_BITMAP;
 
        if (mt76_is_mmio(dev))
                req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE;
index a109686..1c73beb 100644 (file)
@@ -559,6 +559,7 @@ enum {
        MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d,
        MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
        MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
+       MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a,
        MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
        MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
        MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
@@ -575,6 +576,7 @@ enum {
 enum {
        UNI_BSS_INFO_BASIC = 0,
        UNI_BSS_INFO_RLM = 2,
+       UNI_BSS_INFO_BSS_COLOR = 4,
        UNI_BSS_INFO_HE_BASIC = 5,
        UNI_BSS_INFO_BCN_CONTENT = 7,
        UNI_BSS_INFO_QBSS = 15,
@@ -591,6 +593,36 @@ enum {
 };
 
 enum {
+       MT_NIC_CAP_TX_RESOURCE,
+       MT_NIC_CAP_TX_EFUSE_ADDR,
+       MT_NIC_CAP_COEX,
+       MT_NIC_CAP_SINGLE_SKU,
+       MT_NIC_CAP_CSUM_OFFLOAD,
+       MT_NIC_CAP_HW_VER,
+       MT_NIC_CAP_SW_VER,
+       MT_NIC_CAP_MAC_ADDR,
+       MT_NIC_CAP_PHY,
+       MT_NIC_CAP_MAC,
+       MT_NIC_CAP_FRAME_BUF,
+       MT_NIC_CAP_BEAM_FORM,
+       MT_NIC_CAP_LOCATION,
+       MT_NIC_CAP_MUMIMO,
+       MT_NIC_CAP_BUFFER_MODE_INFO,
+       MT_NIC_CAP_HW_ADIE_VERSION = 0x14,
+       MT_NIC_CAP_ANTSWP = 0x16,
+       MT_NIC_CAP_WFDMA_REALLOC,
+       MT_NIC_CAP_6G,
+};
+
+#define UNI_WOW_DETECT_TYPE_MAGIC              BIT(0)
+#define UNI_WOW_DETECT_TYPE_ANY                        BIT(1)
+#define UNI_WOW_DETECT_TYPE_DISCONNECT         BIT(2)
+#define UNI_WOW_DETECT_TYPE_GTK_REKEY_FAIL     BIT(3)
+#define UNI_WOW_DETECT_TYPE_BCN_LOST           BIT(4)
+#define UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT       BIT(5)
+#define UNI_WOW_DETECT_TYPE_BITMAP             BIT(6)
+
+enum {
        UNI_SUSPEND_MODE_SETTING,
        UNI_SUSPEND_WOW_CTRL,
        UNI_SUSPEND_WOW_GPIO_PARAM,
@@ -762,7 +794,7 @@ struct mt76_connac_sched_scan_req {
        u8 intervals_num;
        u8 scan_func; /* MT7663: BIT(0) eable random mac address */
        struct mt76_connac_mcu_scan_channel channels[64];
-       __le16 intervals[MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL];
+       __le16 intervals[MT76_CONNAC_MAX_NUM_SCHED_SCAN_INTERVAL];
        union {
                struct {
                        u8 random_mac[ETH_ALEN];
@@ -770,7 +802,9 @@ struct mt76_connac_sched_scan_req {
                } mt7663;
                struct {
                        u8 bss_idx;
-                       u8 pad2[63];
+                       u8 pad2[19];
+                       u8 random_mac[ETH_ALEN];
+                       u8 pad3[38];
                } mt7921;
        };
 } __packed;
@@ -781,6 +815,14 @@ struct mt76_connac_sched_scan_done {
        __le16 pad;
 } __packed;
 
+struct bss_info_uni_bss_color {
+       __le16 tag;
+       __le16 len;
+       u8 enable;
+       u8 bss_color;
+       u8 rsv[2];
+} __packed;
+
 struct bss_info_uni_he {
        __le16 tag;
        __le16 len;
@@ -885,15 +927,24 @@ struct mt76_connac_suspend_tlv {
        u8 pad[5];
 } __packed;
 
+enum mt76_sta_info_state {
+       MT76_STA_INFO_STATE_NONE,
+       MT76_STA_INFO_STATE_AUTH,
+       MT76_STA_INFO_STATE_ASSOC
+};
+
 struct mt76_sta_cmd_info {
        struct ieee80211_sta *sta;
        struct mt76_wcid *wcid;
 
        struct ieee80211_vif *vif;
 
+       bool offload_fw;
        bool enable;
+       bool newly;
        int cmd;
        u8 rcpi;
+       u8 state;
 };
 
 #define MT_SKU_POWER_LIMIT     161
@@ -963,18 +1014,23 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
 int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
 void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
                                   struct ieee80211_vif *vif,
-                                  struct ieee80211_sta *sta, bool enable);
+                                  struct ieee80211_sta *sta, bool enable,
+                                  bool newly);
 void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                      struct ieee80211_vif *vif,
                                      struct ieee80211_sta *sta, void *sta_wtbl,
                                      void *wtbl_tlv);
 void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
+                                       struct ieee80211_vif *vif,
                                        struct mt76_wcid *wcid,
                                        void *sta_wtbl, void *wtbl_tlv);
+int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
+                                        struct ieee80211_vif *vif,
+                                        struct mt76_wcid *wcid, int cmd);
 void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
                             struct ieee80211_sta *sta,
                             struct ieee80211_vif *vif,
-                            u8 rcpi);
+                            u8 rcpi, u8 state);
 void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
                                 struct ieee80211_sta *sta, void *sta_wtbl,
                                 void *wtbl_tlv);
@@ -996,8 +1052,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
                                struct ieee80211_vif *vif,
                                struct mt76_wcid *wcid,
                                bool enable);
-int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
-                               struct mt76_sta_cmd_info *info);
+int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
+                           struct mt76_sta_cmd_info *info);
 void mt76_connac_mcu_beacon_loss_iter(void *priv, u8 *mac,
                                      struct ieee80211_vif *vif);
 int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band);
@@ -1008,6 +1064,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
 int mt76_connac_mcu_start_patch(struct mt76_dev *dev);
 int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get);
 int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option);
+int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy);
 
 int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
                            struct ieee80211_scan_request *scan_req);
@@ -1028,6 +1085,9 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
 int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
 void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
                                      struct ieee80211_vif *vif);
+int mt76_connac_sta_state_dp(struct mt76_dev *dev,
+                            enum ieee80211_sta_state old_state,
+                            enum ieee80211_sta_state new_state);
 int mt76_connac_mcu_chip_config(struct mt76_dev *dev);
 int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable);
 void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
index dd66fd1..cea2421 100644 (file)
@@ -68,7 +68,7 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
                nic_conf1 &= 0xff00;
 
        if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
-               dev_err(dev->mt76.dev,
+               dev_dbg(dev->mt76.dev,
                        "driver does not support HW RF ctrl\n");
 
        if (!mt76x02_field_valid(nic_conf0 >> 8))
index 0da3786..c32e6dc 100644 (file)
@@ -34,24 +34,24 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
 {
        memset(key_data, 0, 32);
        if (!key)
-               return MT_CIPHER_NONE;
+               return MT76X02_CIPHER_NONE;
 
        if (key->keylen > 32)
-               return MT_CIPHER_NONE;
+               return MT76X02_CIPHER_NONE;
 
        memcpy(key_data, key->key, key->keylen);
 
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
+               return MT76X02_CIPHER_WEP40;
        case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
+               return MT76X02_CIPHER_WEP104;
        case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
+               return MT76X02_CIPHER_TKIP;
        case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
+               return MT76X02_CIPHER_AES_CCMP;
        default:
-               return MT_CIPHER_NONE;
+               return MT76X02_CIPHER_NONE;
        }
 }
 
@@ -63,7 +63,7 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
        u32 val;
 
        cipher = mt76x02_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
+       if (cipher == MT76X02_CIPHER_NONE && key)
                return -EOPNOTSUPP;
 
        val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
@@ -91,10 +91,10 @@ void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
        eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
 
        pn = (u64)eiv << 16;
-       if (cipher == MT_CIPHER_TKIP) {
+       if (cipher == MT76X02_CIPHER_TKIP) {
                pn |= (iv >> 16) & 0xff;
                pn |= (iv & 0xff) << 8;
-       } else if (cipher >= MT_CIPHER_AES_CCMP) {
+       } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
                pn |= iv & 0xffff;
        } else {
                return;
@@ -112,7 +112,7 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
        u64 pn;
 
        cipher = mt76x02_mac_get_key_info(key, key_data);
-       if (cipher == MT_CIPHER_NONE && key)
+       if (cipher == MT76X02_CIPHER_NONE && key)
                return -EOPNOTSUPP;
 
        mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
@@ -126,16 +126,16 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                pn = atomic64_read(&key->tx_pn);
 
                iv_data[3] = key->keyidx << 6;
-               if (cipher >= MT_CIPHER_TKIP) {
+               if (cipher >= MT76X02_CIPHER_TKIP) {
                        iv_data[3] |= 0x20;
                        put_unaligned_le32(pn >> 16, &iv_data[4]);
                }
 
-               if (cipher == MT_CIPHER_TKIP) {
+               if (cipher == MT76X02_CIPHER_TKIP) {
                        iv_data[0] = (pn >> 8) & 0xff;
                        iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
                        iv_data[2] = pn & 0xff;
-               } else if (cipher >= MT_CIPHER_AES_CCMP) {
+               } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
                        put_unaligned_le16((pn & 0xffff), &iv_data[0]);
                }
        }
@@ -1022,12 +1022,12 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
                mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
 }
 
-void mt76x02_update_channel(struct mt76_dev *mdev)
+void mt76x02_update_channel(struct mt76_phy *mphy)
 {
-       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
        struct mt76_channel_state *state;
 
-       state = mdev->phy.chan_state;
+       state = mphy->chan_state;
        state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
 
        spin_lock_bh(&dev->mt76.cc_lock);
@@ -1169,7 +1169,7 @@ void mt76x02_mac_work(struct work_struct *work)
 
        mutex_lock(&dev->mt76.mutex);
 
-       mt76_update_survey(&dev->mt76);
+       mt76_update_survey(&dev->mphy);
        for (i = 0, idx = 0; i < 16; i++) {
                u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
 
index 0cfbaca..5dc6c83 100644 (file)
@@ -195,7 +195,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
                            struct ieee80211_sta *sta, int len);
 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
 void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-void mt76x02_update_channel(struct mt76_dev *mdev);
+void mt76x02_update_channel(struct mt76_phy *mphy);
 void mt76x02_mac_work(struct work_struct *work);
 
 void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
index 3e72227..fa7872a 100644 (file)
@@ -692,15 +692,15 @@ struct mt76_wcid_key {
 } __packed __aligned(4);
 
 enum mt76x02_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CKIP40,
-       MT_CIPHER_CKIP104,
-       MT_CIPHER_CKIP128,
-       MT_CIPHER_WAPI,
+       MT76X02_CIPHER_NONE,
+       MT76X02_CIPHER_WEP40,
+       MT76X02_CIPHER_WEP104,
+       MT76X02_CIPHER_TKIP,
+       MT76X02_CIPHER_AES_CCMP,
+       MT76X02_CIPHER_CKIP40,
+       MT76X02_CIPHER_CKIP104,
+       MT76X02_CIPHER_CKIP128,
+       MT76X02_CIPHER_WAPI,
 };
 
 #endif
index 02db5d6..ccdbab3 100644 (file)
@@ -7,24 +7,18 @@
 #include <linux/module.h>
 #include "mt76x02.h"
 
-#define CCK_RATE(_idx, _rate) {                                        \
+#define MT76x02_CCK_RATE(_idx, _rate) {                                        \
        .bitrate = _rate,                                       \
        .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
        .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),            \
        .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + (_idx)),        \
 }
 
-#define OFDM_RATE(_idx, _rate) {                               \
-       .bitrate = _rate,                                       \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),           \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),     \
-}
-
 struct ieee80211_rate mt76x02_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
+       MT76x02_CCK_RATE(0, 10),
+       MT76x02_CCK_RATE(1, 20),
+       MT76x02_CCK_RATE(2, 55),
+       MT76x02_CCK_RATE(3, 110),
        OFDM_RATE(0, 60),
        OFDM_RATE(1, 90),
        OFDM_RATE(2, 120),
index 40c8061..80e4924 100644 (file)
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
 
 obj-$(CONFIG_MT7915E) += mt7915e.o
 
index 6a8ddee..6404824 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "mt7915.h"
 #include "eeprom.h"
+#include "mcu.h"
 
 /** global debugfs **/
 
@@ -16,7 +17,7 @@ mt7915_implicit_txbf_set(void *data, u64 val)
 
        dev->ibf = !!val;
 
-       return mt7915_mcu_set_txbf_type(dev);
+       return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
 }
 
 static int
@@ -147,6 +148,9 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
 {
        struct mt7915_dev *dev = s->private;
        bool ext_phy = phy != &dev->phy;
+       static const char * const bw[] = {
+               "BW20", "BW40", "BW80", "BW160"
+       };
        int cnt;
 
        if (!phy)
@@ -164,11 +168,16 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
        seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
 
        cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
-       seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld\n",
+       seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld",
                   FIELD_GET(MT_ETBF_RX_FB_ALL, cnt),
                   FIELD_GET(MT_ETBF_RX_FB_HE, cnt),
                   FIELD_GET(MT_ETBF_RX_FB_VHT, cnt),
                   FIELD_GET(MT_ETBF_RX_FB_HT, cnt));
+       cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
+       seq_printf(s, "%s, NC: %ld, NR: %ld\n",
+                  bw[FIELD_GET(MT_ETBF_RX_FB_BW, cnt)],
+                  FIELD_GET(MT_ETBF_RX_FB_NC, cnt),
+                  FIELD_GET(MT_ETBF_RX_FB_NR, cnt));
 
        /* Tx Beamformee Rx NDPA & Tx feedback report */
        cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
@@ -204,7 +213,7 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
        mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file);
 
        /* Tx amsdu info */
-       seq_puts(file, "Tx MSDU stat:\n");
+       seq_puts(file, "Tx MSDU statistics:\n");
        for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
                stat[i] = mt76_rr(dev,  MT_PLE_AMSDU_PACK_MSDU_CNT(i));
                n += stat[i];
@@ -224,18 +233,6 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
 
 DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
 
-static int mt7915_read_temperature(struct seq_file *s, void *data)
-{
-       struct mt7915_dev *dev = dev_get_drvdata(s->private);
-       int temp;
-
-       /* cpu */
-       temp = mt7915_mcu_get_temperature(dev, 0);
-       seq_printf(s, "Temperature: %d\n", temp);
-
-       return 0;
-}
-
 static int
 mt7915_queues_acq(struct seq_file *s, void *data)
 {
@@ -307,54 +304,23 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
                "RU26", "RU52", "RU106", "RU242/SU20",
                "RU484/SU40", "RU996/SU80", "RU2x996/SU160"
        };
-       struct mt7915_dev *dev = dev_get_drvdata(s->private);
-       bool ext_phy = phy != &dev->phy;
-       u32 reg_base;
-       int i, idx = 0;
+       s8 txpower[MT7915_SKU_RATE_NUM], *buf;
+       int i;
 
        if (!phy)
                return;
 
-       reg_base = MT_TMAC_FP0R0(ext_phy);
-       seq_printf(s, "\nBand %d\n", ext_phy);
+       seq_printf(s, "\nBand %d\n", phy != &phy->dev->phy);
 
-       for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
-               u8 cnt, mcs_num = mt7915_sku_group_len[i];
-               s8 txpower[12];
-               int j;
+       mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
+       for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
+               u8 mcs_num = mt7915_sku_group_len[i];
 
-               if (i == SKU_HT_BW20 || i == SKU_HT_BW40) {
-                       mcs_num = 8;
-               } else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) {
+               if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160)
                        mcs_num = 10;
-               } else if (i == SKU_HE_RU26) {
-                       reg_base = MT_TMAC_FP0R18(ext_phy);
-                       idx = 0;
-               }
-
-               for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) {
-                       u32 val;
-
-                       if (i == SKU_VHT_BW160 && idx == 60) {
-                               reg_base = MT_TMAC_FP0R15(ext_phy);
-                               idx = 0;
-                       }
-
-                       val = mt76_rr(dev, reg_base + (idx / 4) * 4);
-
-                       if (idx && idx % 4)
-                               val >>= (idx % 4) * 8;
-
-                       while (val > 0 && cnt < mcs_num) {
-                               s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val);
-
-                               txpower[cnt++] = pwr;
-                               val >>= 8;
-                               idx++;
-                       }
-               }
 
-               mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num);
+               mt76_seq_puts_array(s, sku_group_name[i], buf, mcs_num);
+               buf += mt7915_sku_group_len[i];
        }
 }
 
@@ -390,8 +356,6 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
        debugfs_create_file("radar_trigger", 0200, dir, dev,
                            &fops_radar_trigger);
        debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
-       debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
-                                   mt7915_read_temperature);
        debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
                                    mt7915_read_rate_txpower);
 
index 11d0b76..9182568 100644 (file)
@@ -19,39 +19,6 @@ int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
        return 0;
 }
 
-void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
-                        struct sk_buff *skb)
-{
-       struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
-       __le32 *rxd = (__le32 *)skb->data;
-       enum rx_pkt_type type;
-
-       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
-
-       switch (type) {
-       case PKT_TYPE_TXRX_NOTIFY:
-               mt7915_mac_tx_free(dev, skb);
-               break;
-       case PKT_TYPE_RX_EVENT:
-               mt7915_mcu_rx_event(dev, skb);
-               break;
-#ifdef CONFIG_NL80211_TESTMODE
-       case PKT_TYPE_TXRXV:
-               mt7915_mac_fill_rx_vector(dev, skb);
-               break;
-#endif
-       case PKT_TYPE_NORMAL:
-               if (!mt7915_mac_fill_rx(dev, skb)) {
-                       mt76_rx(&dev->mt76, q, skb);
-                       return;
-               }
-               fallthrough;
-       default:
-               dev_kfree_skb(skb);
-               break;
-       }
-}
-
 static void
 mt7915_tx_cleanup(struct mt7915_dev *dev)
 {
@@ -112,8 +79,6 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev)
 
 int mt7915_dma_init(struct mt7915_dev *dev)
 {
-       /* Increase buffer size to receive large VHT/HE MPDUs */
-       int rx_buf_size = MT_RX_BUF_SIZE * 2;
        u32 hif1_ofs = 0;
        int ret;
 
@@ -177,28 +142,28 @@ int mt7915_dma_init(struct mt7915_dev *dev)
        /* event from WM */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
                               MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_RX_EVENT_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
        if (ret)
                return ret;
 
        /* event from WA */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
                               MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_RX_EVENT_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
        if (ret)
                return ret;
 
        /* rx data queue */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
                               MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
-                              rx_buf_size, MT_RX_DATA_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
        if (ret)
                return ret;
 
        if (dev->dbdc_support) {
                ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
                                       MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
-                                      rx_buf_size,
+                                      MT_RX_BUF_SIZE,
                                       MT_RX_DATA_RING_BASE + hif1_ofs);
                if (ret)
                        return ret;
@@ -207,7 +172,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
                ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
                                       MT7915_RXQ_MCU_WA_EXT,
                                       MT7915_RX_MCU_RING_SIZE,
-                                      rx_buf_size,
+                                      MT_RX_BUF_SIZE,
                                       MT_RX_EVENT_RING_BASE + hif1_ofs);
                if (ret)
                        return ret;
index 8ededf2..ee3d644 100644 (file)
@@ -4,22 +4,12 @@
 #include "mt7915.h"
 #include "eeprom.h"
 
-static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
-{
-       u8 *data = dev->mt76.eeprom.data;
-
-       if (data[offset] == 0xff && !dev->flash_mode)
-               mt7915_mcu_get_eeprom(dev, offset);
-
-       return data[offset];
-}
-
 static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
 {
        struct mt76_dev *mdev = &dev->mt76;
-       u32 val;
+       u8 *eeprom = mdev->eeprom.data;
+       u32 val = eeprom[MT_EE_DO_PRE_CAL];
 
-       val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL);
        if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
                return 0;
 
@@ -43,7 +33,13 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
                dev->flash_mode = true;
                ret = mt7915_eeprom_load_precal(dev);
        } else {
-               memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
+               u32 block_num, i;
+
+               block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
+                                        MT7915_EEPROM_BLOCK_SIZE);
+               for (i = 0; i < block_num; i++)
+                       mt7915_mcu_get_eeprom(dev,
+                                             i * MT7915_EEPROM_BLOCK_SIZE);
        }
 
        return ret;
@@ -52,10 +48,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
 static int mt7915_check_eeprom(struct mt7915_dev *dev)
 {
        u8 *eeprom = dev->mt76.eeprom.data;
-       u16 val;
-
-       mt7915_eeprom_read(dev, MT_EE_CHIP_ID);
-       val = get_unaligned_le16(eeprom);
+       u16 val = get_unaligned_le16(eeprom);
 
        switch (val) {
        case 0x7915:
@@ -69,9 +62,10 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
        bool ext_phy = phy != &dev->phy;
+       u8 *eeprom = dev->mt76.eeprom.data;
        u32 val;
 
-       val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF + ext_phy);
+       val = eeprom[MT_EE_WIFI_CONF + ext_phy];
        val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
        if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
                val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
@@ -143,6 +137,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
                                   struct ieee80211_channel *chan,
                                   u8 chain_idx)
 {
+       u8 *eeprom = dev->mt76.eeprom.data;
        int index, target_power;
        bool tssi_on;
 
@@ -153,18 +148,18 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
 
        if (chan->band == NL80211_BAND_2GHZ) {
                index = MT_EE_TX0_POWER_2G + chain_idx * 3;
-               target_power = mt7915_eeprom_read(dev, index);
+               target_power = eeprom[index];
 
                if (!tssi_on)
-                       target_power += mt7915_eeprom_read(dev, index + 1);
+                       target_power += eeprom[index + 1];
        } else {
                int group = mt7915_get_channel_group(chan->hw_value);
 
                index = MT_EE_TX0_POWER_5G + chain_idx * 12;
-               target_power = mt7915_eeprom_read(dev, index + group);
+               target_power = eeprom[index + group];
 
                if (!tssi_on)
-                       target_power += mt7915_eeprom_read(dev, index + 8);
+                       target_power += eeprom[index + 8];
        }
 
        return target_power;
@@ -172,13 +167,14 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
 
 s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
 {
+       u8 *eeprom = dev->mt76.eeprom.data;
        u32 val;
        s8 delta;
 
        if (band == NL80211_BAND_2GHZ)
-               val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G);
+               val = eeprom[MT_EE_RATE_DELTA_2G];
        else
-               val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G);
+               val = eeprom[MT_EE_RATE_DELTA_5G];
 
        if (!(val & MT_EE_RATE_DELTA_EN))
                return 0;
index 033fb59..a43389a 100644 (file)
@@ -33,7 +33,7 @@ enum mt7915_eeprom_field {
 #define MT_EE_WIFI_CAL_GROUP                   BIT(0)
 #define MT_EE_WIFI_CAL_DPD                     GENMASK(2, 1)
 #define MT_EE_CAL_UNIT                         1024
-#define MT_EE_CAL_GROUP_SIZE                   (44 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_GROUP_SIZE                   (49 * MT_EE_CAL_UNIT + 16)
 #define MT_EE_CAL_DPD_SIZE                     (54 * MT_EE_CAL_UNIT)
 
 #define MT_EE_WIFI_CONF0_TX_PATH               GENMASK(2, 0)
@@ -99,12 +99,15 @@ static inline bool
 mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
 {
        u8 *eep = dev->mt76.eeprom.data;
+       u8 val = eep[MT_EE_WIFI_CONF + 7];
 
-       /* TODO: DBDC */
-       if (band == NL80211_BAND_5GHZ)
-               return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_5G;
+       if (band == NL80211_BAND_2GHZ)
+               return val & MT_EE_WIFI_CONF7_TSSI0_2G;
+
+       if (dev->dbdc_support)
+               return val & MT_EE_WIFI_CONF7_TSSI1_5G;
        else
-               return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
+               return val & MT_EE_WIFI_CONF7_TSSI0_5G;
 }
 
 extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
index 822f3aa..4798d63 100644 (file)
@@ -2,39 +2,14 @@
 /* Copyright (C) 2020 MediaTek Inc. */
 
 #include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
 #include "mt7915.h"
 #include "mac.h"
 #include "mcu.h"
 #include "eeprom.h"
 
-#define CCK_RATE(_idx, _rate) {                                                \
-       .bitrate = _rate,                                               \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                         \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),                    \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)),        \
-}
-
-#define OFDM_RATE(_idx, _rate) {                                       \
-       .bitrate = _rate,                                               \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),                   \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),             \
-}
-
-static struct ieee80211_rate mt7915_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = 1,
@@ -67,6 +42,117 @@ static const struct ieee80211_iface_combination if_comb[] = {
        }
 };
 
+static ssize_t mt7915_thermal_show_temp(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct mt7915_phy *phy = dev_get_drvdata(dev);
+       int temperature;
+
+       temperature = mt7915_mcu_get_temperature(phy);
+       if (temperature < 0)
+               return temperature;
+
+       /* display in millidegree celcius */
+       return sprintf(buf, "%u\n", temperature * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7915_thermal_show_temp,
+                         NULL, 0);
+
+static struct attribute *mt7915_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(mt7915_hwmon);
+
+static int
+mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long *state)
+{
+       *state = MT7915_THERMAL_THROTTLE_MAX;
+
+       return 0;
+}
+
+static int
+mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long *state)
+{
+       struct mt7915_phy *phy = cdev->devdata;
+
+       *state = phy->throttle_state;
+
+       return 0;
+}
+
+static int
+mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long state)
+{
+       struct mt7915_phy *phy = cdev->devdata;
+       int ret;
+
+       if (state > MT7915_THERMAL_THROTTLE_MAX)
+               return -EINVAL;
+
+       if (state == phy->throttle_state)
+               return 0;
+
+       ret = mt7915_mcu_set_thermal_throttling(phy, state);
+       if (ret)
+               return ret;
+
+       phy->throttle_state = state;
+
+       return 0;
+}
+
+static const struct thermal_cooling_device_ops mt7915_thermal_ops = {
+       .get_max_state = mt7915_thermal_get_max_throttle_state,
+       .get_cur_state = mt7915_thermal_get_cur_throttle_state,
+       .set_cur_state = mt7915_thermal_set_cur_throttle_state,
+};
+
+static void mt7915_unregister_thermal(struct mt7915_phy *phy)
+{
+       struct wiphy *wiphy = phy->mt76->hw->wiphy;
+
+       if (!phy->cdev)
+           return;
+
+       sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+       thermal_cooling_device_unregister(phy->cdev);
+}
+
+static int mt7915_thermal_init(struct mt7915_phy *phy)
+{
+       struct wiphy *wiphy = phy->mt76->hw->wiphy;
+       struct thermal_cooling_device *cdev;
+       struct device *hwmon;
+
+       cdev = thermal_cooling_device_register(wiphy_name(wiphy), phy,
+                                              &mt7915_thermal_ops);
+       if (!IS_ERR(cdev)) {
+               if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
+                                     "cooling_device") < 0)
+                       thermal_cooling_device_unregister(cdev);
+               else
+                       phy->cdev = cdev;
+       }
+
+       if (!IS_REACHABLE(CONFIG_HWMON))
+               return 0;
+
+       hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
+                                                      wiphy_name(wiphy), phy,
+                                                      mt7915_hwmon_groups);
+       if (IS_ERR(hwmon))
+               return PTR_ERR(hwmon);
+
+       return 0;
+}
+
 static void
 mt7915_init_txpower(struct mt7915_dev *dev,
                    struct ieee80211_supported_band *sband)
@@ -201,7 +287,6 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
              FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
        mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
 
-       mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
        mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
 
        mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
@@ -228,20 +313,19 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
 {
        int ret;
 
-
        if (dev->dbdc_support) {
-               ret = mt7915_mcu_set_txbf_module(dev);
+               ret = mt7915_mcu_set_txbf(dev, MT_BF_MODULE_UPDATE);
                if (ret)
                        return ret;
        }
 
        /* trigger sounding packets */
-       ret = mt7915_mcu_set_txbf_sounding(dev);
+       ret = mt7915_mcu_set_txbf(dev, MT_BF_SOUNDING_ON);
        if (ret)
                return ret;
 
        /* enable eBF */
-       return mt7915_mcu_set_txbf_type(dev);
+       return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
 }
 
 static int mt7915_register_ext_phy(struct mt7915_dev *dev)
@@ -281,8 +365,12 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
        if (ret)
                goto error;
 
-       ret = mt76_register_phy(mphy, true, mt7915_rates,
-                               ARRAY_SIZE(mt7915_rates));
+       ret = mt76_register_phy(mphy, true, mt76_rates,
+                               ARRAY_SIZE(mt76_rates));
+       if (ret)
+               goto error;
+
+       ret = mt7915_thermal_init(phy);
        if (ret)
                goto error;
 
@@ -480,6 +568,9 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
        if (nss < 2)
                return;
 
+       /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+       elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
+
        if (vif != NL80211_IFTYPE_AP)
                return;
 
@@ -493,9 +584,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
        c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
            IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
        elem->phy_cap_info[6] |= c;
-
-       /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
-       elem->phy_cap_info[7] |= min_t(int, nss - 1, 2) << 3;
 }
 
 static void
@@ -579,8 +667,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
 
                switch (i) {
                case NL80211_IFTYPE_AP:
-                       he_cap_elem->mac_cap_info[0] |=
-                               IEEE80211_HE_MAC_CAP0_TWT_RES;
                        he_cap_elem->mac_cap_info[2] |=
                                IEEE80211_HE_MAC_CAP2_BSR;
                        he_cap_elem->mac_cap_info[4] |=
@@ -594,8 +680,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                                IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
                        break;
                case NL80211_IFTYPE_STATION:
-                       he_cap_elem->mac_cap_info[0] |=
-                               IEEE80211_HE_MAC_CAP0_TWT_REQ;
                        he_cap_elem->mac_cap_info[1] |=
                                IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
 
@@ -690,6 +774,7 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
        if (!phy)
                return;
 
+       mt7915_unregister_thermal(phy);
        mt76_unregister_phy(mphy);
        ieee80211_free_hw(mphy->hw);
 }
@@ -731,8 +816,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
        dev->mt76.test_ops = &mt7915_testmode_ops;
 #endif
 
-       ret = mt76_register_device(&dev->mt76, true, mt7915_rates,
-                                  ARRAY_SIZE(mt7915_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
+       if (ret)
+               return ret;
+
+       ret = mt7915_thermal_init(&dev->phy);
        if (ret)
                return ret;
 
@@ -748,10 +837,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
 void mt7915_unregister_device(struct mt7915_dev *dev)
 {
        mt7915_unregister_ext_phy(dev);
+       mt7915_unregister_thermal(&dev->phy);
        mt76_unregister_device(&dev->mt76);
        mt7915_mcu_exit(dev);
        mt7915_tx_token_put(dev);
        mt7915_dma_cleanup(dev);
+       tasklet_disable(&dev->irq_tasklet);
 
        mt76_free_device(&dev->mt76);
 }
index 7a9759f..2462704 100644 (file)
@@ -307,7 +307,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
        }
 }
 
-int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
+static int
+mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
 {
        struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
        struct mt76_phy *mphy = &dev->mt76.phy;
@@ -412,14 +413,27 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
@@ -610,9 +624,10 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
        return 0;
 }
 
-#ifdef CONFIG_NL80211_TESTMODE
-void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
+static void
+mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
 {
+#ifdef CONFIG_NL80211_TESTMODE
        struct mt7915_phy *phy = &dev->phy;
        __le32 *rxd = (__le32 *)skb->data;
        __le32 *rxv_hdr = rxd + 2;
@@ -650,10 +665,10 @@ void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
 
        phy->test.last_freq_offset = foe;
        phy->test.last_snr = snr;
+#endif
 
        dev_kfree_skb(skb);
 }
-#endif
 
 static void
 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
@@ -885,7 +900,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
 }
 
 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
-                          struct sk_buff *skb, struct mt76_wcid *wcid,
+                          struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
                           struct ieee80211_key_conf *key, bool beacon)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -944,7 +959,12 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
 
        txwi[3] = cpu_to_le32(val);
        txwi[4] = 0;
-       txwi[5] = 0;
+
+       val = FIELD_PREP(MT_TXD5_PID, pid);
+       if (pid >= MT_PACKET_ID_FIRST)
+               val |= MT_TXD5_TX_STATUS_HOST;
+       txwi[5] = cpu_to_le32(val);
+
        txwi[6] = 0;
        txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
 
@@ -984,11 +1004,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
        struct ieee80211_key_conf *key = info->control.hw_key;
        struct ieee80211_vif *vif = info->control.vif;
-       struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
        struct mt76_txwi_cache *t;
        struct mt7915_txp *txp;
        int id, i, nbuf = tx_info->nbuf - 1;
        u8 *txwi = (u8 *)txwi_ptr;
+       int pid;
 
        if (unlikely(tx_info->skb->len <= ETH_HLEN))
                return -EINVAL;
@@ -996,10 +1016,10 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (!wcid)
                wcid = &dev->mt76.global_wcid;
 
-       mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
-                             false);
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
 
-       cb->wcid = wcid->idx;
+       mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
+                             false);
 
        txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
        for (i = 0; i < nbuf; i++) {
@@ -1071,54 +1091,7 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
 }
 
 static void
-mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
-                         struct ieee80211_sta *sta, u8 stat,
-                         struct list_head *free_list)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_tx_status status = {
-               .sta = sta,
-               .info = info,
-               .skb = skb,
-               .free_list = free_list,
-       };
-       struct ieee80211_hw *hw;
-
-       if (sta) {
-               struct mt7915_sta *msta;
-
-               msta = (struct mt7915_sta *)sta->drv_priv;
-               status.rate = &msta->stats.tx_rate;
-       }
-
-#ifdef CONFIG_NL80211_TESTMODE
-       if (mt76_is_testmode_skb(mdev, skb, &hw)) {
-               struct mt7915_phy *phy = mt7915_hw_phy(hw);
-               struct ieee80211_vif *vif = phy->monitor_vif;
-               struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-
-               mt76_tx_complete_skb(mdev, mvif->sta.wcid.idx, skb);
-               return;
-       }
-#endif
-
-       hw = mt76_tx_status_get_hw(mdev, skb);
-
-       if (info->flags & IEEE80211_TX_CTL_AMPDU)
-               info->flags |= IEEE80211_TX_STAT_AMPDU;
-
-       if (stat)
-               ieee80211_tx_info_clear_status(info);
-
-       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
-               info->flags |= IEEE80211_TX_STAT_ACK;
-
-       info->status.tx_time = 0;
-       ieee80211_tx_status_ext(hw, &status);
-}
-
-void mt7915_txp_skb_unmap(struct mt76_dev *dev,
-                         struct mt76_txwi_cache *t)
+mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
 {
        struct mt7915_txp *txp;
        int i;
@@ -1129,7 +1102,39 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
                                 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
 }
 
-void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
+static void
+mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
+                struct ieee80211_sta *sta, struct list_head *free_list)
+{
+       struct mt76_dev *mdev = &dev->mt76;
+       struct mt76_wcid *wcid;
+       __le32 *txwi;
+       u16 wcid_idx;
+
+       mt7915_txp_skb_unmap(mdev, t);
+       if (!t->skb)
+               goto out;
+
+       txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+       if (sta) {
+               wcid = (struct mt76_wcid *)sta->drv_priv;
+               wcid_idx = wcid->idx;
+
+               if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+                       mt7915_tx_check_aggr(sta, txwi);
+       } else {
+               wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+       }
+
+       __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+
+out:
+       t->skb = NULL;
+       mt76_put_txwi(mdev, t);
+}
+
+static void
+mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
 {
        struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
        struct mt76_dev *mdev = &dev->mt76;
@@ -1194,28 +1199,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
                if (!txwi)
                        continue;
 
-               mt7915_txp_skb_unmap(mdev, txwi);
-               if (txwi->skb) {
-                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
-                       void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
-
-                       if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
-                               mt7915_tx_check_aggr(sta, txwi_ptr);
-
-                       if (sta && !info->tx_time_est) {
-                               struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-                               int pending;
-
-                               pending = atomic_dec_return(&wcid->non_aql_packets);
-                               if (pending < 0)
-                                       atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
-                       }
-
-                       mt7915_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
-                       txwi->skb = NULL;
-               }
-
-               mt76_put_txwi(mdev, txwi);
+               mt7915_txwi_free(dev, txwi, sta, &free_list);
        }
 
        mt7915_mac_sta_poll(dev);
@@ -1233,6 +1217,120 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
        }
 }
 
+static bool
+mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
+                      __le32 *txs_data)
+{
+       struct mt76_dev *mdev = &dev->mt76;
+       struct ieee80211_tx_info *info;
+       struct sk_buff_head list;
+       struct sk_buff *skb;
+
+       mt76_tx_status_lock(mdev, &list);
+       skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+       if (!skb)
+               goto out;
+
+       info = IEEE80211_SKB_CB(skb);
+       if (!(txs_data[0] & le32_to_cpu(MT_TXS0_ACK_ERROR_MASK)))
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
+       info->status.ampdu_len = 1;
+       info->status.ampdu_ack_len = !!(info->flags &
+                                       IEEE80211_TX_STAT_ACK);
+
+       info->status.rates[0].idx = -1;
+       mt76_tx_status_skb_done(mdev, skb, &list);
+
+out:
+       mt76_tx_status_unlock(mdev, &list);
+
+       return !!skb;
+}
+
+static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
+{
+       struct mt7915_sta *msta = NULL;
+       struct mt76_wcid *wcid;
+       __le32 *txs_data = data;
+       u16 wcidx;
+       u32 txs;
+       u8 pid;
+
+       txs = le32_to_cpu(txs_data[0]);
+       if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+               return;
+
+       txs = le32_to_cpu(txs_data[2]);
+       wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+
+       txs = le32_to_cpu(txs_data[3]);
+       pid = FIELD_GET(MT_TXS3_PID, txs);
+
+       if (pid < MT_PACKET_ID_FIRST)
+               return;
+
+       if (wcidx >= MT7915_WTBL_SIZE)
+               return;
+
+       rcu_read_lock();
+
+       wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+       if (!wcid)
+               goto out;
+
+       mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data);
+
+       if (!wcid->sta)
+               goto out;
+
+       msta = container_of(wcid, struct mt7915_sta, wcid);
+       spin_lock_bh(&dev->sta_poll_lock);
+       if (list_empty(&msta->poll_list))
+               list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+       spin_unlock_bh(&dev->sta_poll_lock);
+
+out:
+       rcu_read_unlock();
+}
+
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+                        struct sk_buff *skb)
+{
+       struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+       __le32 *rxd = (__le32 *)skb->data;
+       __le32 *end = (__le32 *)&skb->data[skb->len];
+       enum rx_pkt_type type;
+
+       type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+       switch (type) {
+       case PKT_TYPE_TXRX_NOTIFY:
+               mt7915_mac_tx_free(dev, skb);
+               break;
+       case PKT_TYPE_RX_EVENT:
+               mt7915_mcu_rx_event(dev, skb);
+               break;
+       case PKT_TYPE_TXRXV:
+               mt7915_mac_fill_rx_vector(dev, skb);
+               break;
+       case PKT_TYPE_TXS:
+               for (rxd += 2; rxd + 8 <= end; rxd += 8)
+                   mt7915_mac_add_txs(dev, rxd);
+               dev_kfree_skb(skb);
+               break;
+       case PKT_TYPE_NORMAL:
+               if (!mt7915_mac_fill_rx(dev, skb)) {
+                       mt76_rx(&dev->mt76, q, skb);
+                       return;
+               }
+               fallthrough;
+       default:
+               dev_kfree_skb(skb);
+               break;
+       }
+}
+
 void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
 {
        struct mt7915_dev *dev;
@@ -1254,15 +1352,8 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
                e->skb = t ? t->skb : NULL;
        }
 
-       if (e->skb) {
-               struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
-               struct mt76_wcid *wcid;
-
-               wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
-
-               mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
-                                         NULL);
-       }
+       if (e->skb)
+               mt76_tx_complete_skb(mdev, e->wcid, e->skb);
 }
 
 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
@@ -1296,14 +1387,10 @@ void mt7915_mac_reset_counters(struct mt7915_phy *phy)
        memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
 
        /* reset airtime counters */
-       mt76_rr(dev, MT_MIB_SDR9(ext_phy));
-       mt76_rr(dev, MT_MIB_SDR36(ext_phy));
-       mt76_rr(dev, MT_MIB_SDR37(ext_phy));
-
-       mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy),
-                MT_WF_RMAC_MIB_RXTIME_CLR);
        mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
                 MT_WF_RMAC_MIB_RXTIME_CLR);
+
+       mt7915_mcu_get_chan_mib_info(phy, true);
 }
 
 void mt7915_mac_set_timing(struct mt7915_phy *phy)
@@ -1397,53 +1484,24 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
        return sum / n;
 }
 
-static void
-mt7915_phy_update_channel(struct mt76_phy *mphy, int idx)
+void mt7915_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76);
        struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
-       struct mt76_channel_state *state;
-       u64 busy_time, tx_time, rx_time, obss_time;
+       struct mt76_channel_state *state = mphy->chan_state;
+       bool ext_phy = phy != &phy->dev->phy;
        int nf;
 
-       busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
-                                  MT_MIB_SDR9_BUSY_MASK);
-       tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
-                                MT_MIB_SDR36_TXTIME_MASK);
-       rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
-                                MT_MIB_SDR37_RXTIME_MASK);
-       obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
-                                  MT_MIB_OBSSTIME_MASK);
+       mt7915_mcu_get_chan_mib_info(phy, false);
 
-       nf = mt7915_phy_get_nf(phy, idx);
+       nf = mt7915_phy_get_nf(phy, ext_phy);
        if (!phy->noise)
                phy->noise = nf << 4;
        else if (nf)
                phy->noise += nf - (phy->noise >> 4);
 
-       state = mphy->chan_state;
-       state->cc_busy += busy_time;
-       state->cc_tx += tx_time;
-       state->cc_rx += rx_time + obss_time;
-       state->cc_bss_rx += rx_time;
        state->noise = -(phy->noise >> 4);
 }
 
-void mt7915_update_channel(struct mt76_dev *mdev)
-{
-       struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
-
-       mt7915_phy_update_channel(&mdev->phy, 0);
-       if (mdev->phy2)
-               mt7915_phy_update_channel(mdev->phy2, 1);
-
-       /* reset obss airtime */
-       mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
-       if (mdev->phy2)
-               mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1),
-                        MT_WF_RMAC_MIB_RXTIME_CLR);
-}
-
 static bool
 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
 {
@@ -1530,14 +1588,18 @@ mt7915_dma_reset(struct mt7915_dev *dev)
        mt76_set(dev, MT_WFDMA0_GLO_CFG,
                 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
        mt76_set(dev, MT_WFDMA1_GLO_CFG,
-                MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+                MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+                MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+                MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
        if (dev->hif2) {
                mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
                        (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
                         MT_WFDMA0_GLO_CFG_RX_DMA_EN));
                mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
                        (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
-                        MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+                        MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+                        MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+                        MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
        }
 }
 
@@ -1548,14 +1610,7 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
 
        spin_lock_bh(&dev->mt76.token_lock);
        idr_for_each_entry(&dev->mt76.token, txwi, id) {
-               mt7915_txp_skb_unmap(&dev->mt76, txwi);
-               if (txwi->skb) {
-                       struct ieee80211_hw *hw;
-
-                       hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
-                       ieee80211_free_txskb(hw, txwi->skb);
-               }
-               mt76_put_txwi(&dev->mt76, txwi);
+               mt7915_txwi_free(dev, txwi, NULL, NULL);
                dev->mt76.token_count--;
        }
        spin_unlock_bh(&dev->mt76.token_lock);
@@ -1588,11 +1643,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
                set_bit(MT76_RESET, &phy2->mt76->state);
                cancel_delayed_work_sync(&phy2->mt76->mac_work);
        }
-       /* lock/unlock all queues to ensure that no tx is pending */
-       mt76_txq_schedule_all(&dev->mphy);
-       if (ext_phy)
-               mt76_txq_schedule_all(ext_phy);
-
        mt76_worker_disable(&dev->mt76.tx_worker);
        napi_disable(&dev->mt76.napi[0]);
        napi_disable(&dev->mt76.napi[1]);
@@ -1618,10 +1668,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
        if (phy2)
                clear_bit(MT76_RESET, &phy2->mt76->state);
 
-       mt76_worker_enable(&dev->mt76.tx_worker);
-       napi_enable(&dev->mt76.tx_napi);
-       napi_schedule(&dev->mt76.tx_napi);
-
        napi_enable(&dev->mt76.napi[0]);
        napi_schedule(&dev->mt76.napi[0]);
 
@@ -1630,14 +1676,20 @@ void mt7915_mac_reset_work(struct work_struct *work)
 
        napi_enable(&dev->mt76.napi[2]);
        napi_schedule(&dev->mt76.napi[2]);
+       tasklet_schedule(&dev->irq_tasklet);
+
+       mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+       mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+       mt76_worker_enable(&dev->mt76.tx_worker);
+
+       napi_enable(&dev->mt76.tx_napi);
+       napi_schedule(&dev->mt76.tx_napi);
 
        ieee80211_wake_queues(mt76_hw(dev));
        if (ext_phy)
                ieee80211_wake_queues(ext_phy->hw);
 
-       mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
-       mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
-
        mutex_unlock(&dev->mt76.mutex);
 
        mt7915_update_beacons(dev);
@@ -1651,7 +1703,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
 }
 
 static void
-mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
+mt7915_mac_update_stats(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
        struct mib_stats *mib = &phy->mib;
@@ -1733,8 +1785,10 @@ void mt7915_mac_sta_rc_work(struct work_struct *work)
 
                if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
                               IEEE80211_RC_NSS_CHANGED |
-                              IEEE80211_RC_BW_CHANGED))
+                              IEEE80211_RC_BW_CHANGED)) {
+                       mt7915_mcu_add_he(dev, vif, sta);
                        mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+               }
 
                if (changed & IEEE80211_RC_SMPS_CHANGED)
                        mt7915_mcu_add_smps(dev, vif, sta);
@@ -1756,11 +1810,11 @@ void mt7915_mac_work(struct work_struct *work)
 
        mutex_lock(&mphy->dev->mutex);
 
-       mt76_update_survey(mphy->dev);
+       mt76_update_survey(mphy);
        if (++mphy->mac_work_count == 5) {
                mphy->mac_work_count = 0;
 
-               mt7915_mac_update_mib_stats(phy);
+               mt7915_mac_update_stats(phy);
        }
 
        if (++phy->sta_work_count == 10) {
@@ -1770,6 +1824,8 @@ void mt7915_mac_work(struct work_struct *work)
 
        mutex_unlock(&mphy->dev->mutex);
 
+       mt76_tx_status_check(mphy->dev, NULL, false);
+
        ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
                                     MT7915_WATCHDOG_TIME);
 }
index 0f929fb..eb1885f 100644 (file)
@@ -304,6 +304,62 @@ struct mt7915_tx_free {
 /* will support this field in further revision */
 #define MT_TX_FREE_RATE                        GENMASK(13, 0)
 
+#define MT_TXS0_FIXED_RATE             BIT(31)
+#define MT_TXS0_BW                     GENMASK(30, 29)
+#define MT_TXS0_TID                    GENMASK(28, 26)
+#define MT_TXS0_AMPDU                  BIT(25)
+#define MT_TXS0_TXS_FORMAT             GENMASK(24, 23)
+#define MT_TXS0_BA_ERROR               BIT(22)
+#define MT_TXS0_PS_FLAG                        BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT           BIT(20)
+#define MT_TXS0_BIP_ERROR              BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT          BIT(18)
+#define MT_TXS0_RTS_TIMEOUT            BIT(17)
+#define MT_TXS0_ACK_TIMEOUT            BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK         GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST         BIT(15)
+#define MT_TXS0_TX_STATUS_MCU          BIT(14)
+#define MT_TXS0_TX_RATE                        GENMASK(13, 0)
+
+#define MT_TXS1_SEQNO                  GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE              GENMASK(19, 16)
+#define MT_TXS1_RXV_SEQNO              GENMASK(15, 8)
+#define MT_TXS1_TX_POWER_DBM           GENMASK(7, 0)
+
+#define MT_TXS2_BF_STATUS              GENMASK(31, 30)
+#define MT_TXS2_LAST_TX_RATE           GENMASK(29, 27)
+#define MT_TXS2_SHARED_ANTENNA         BIT(26)
+#define MT_TXS2_WCID                   GENMASK(25, 16)
+#define MT_TXS2_TX_DELAY               GENMASK(15, 0)
+
+#define MT_TXS3_PID                    GENMASK(31, 24)
+#define MT_TXS3_ANT_ID                 GENMASK(23, 0)
+
+#define MT_TXS4_TIMESTAMP              GENMASK(31, 0)
+
+#define MT_TXS5_F0_FINAL_MPDU          BIT(31)
+#define MT_TXS5_F0_QOS                 BIT(30)
+#define MT_TXS5_F0_TX_COUNT            GENMASK(29, 25)
+#define MT_TXS5_F0_FRONT_TIME          GENMASK(24, 0)
+#define MT_TXS5_F1_MPDU_TX_COUNT       GENMASK(31, 24)
+#define MT_TXS5_F1_MPDU_TX_BYTES       GENMASK(23, 0)
+
+#define MT_TXS6_F0_NOISE_3             GENMASK(31, 24)
+#define MT_TXS6_F0_NOISE_2             GENMASK(23, 16)
+#define MT_TXS6_F0_NOISE_1             GENMASK(15, 8)
+#define MT_TXS6_F0_NOISE_0             GENMASK(7, 0)
+#define MT_TXS6_F1_MPDU_FAIL_COUNT     GENMASK(31, 24)
+#define MT_TXS6_F1_MPDU_FAIL_BYTES     GENMASK(23, 0)
+
+#define MT_TXS7_F0_RCPI_3              GENMASK(31, 24)
+#define MT_TXS7_F0_RCPI_2              GENMASK(23, 16)
+#define MT_TXS7_F0_RCPI_1              GENMASK(15, 8)
+#define MT_TXS7_F0_RCPI_0              GENMASK(7, 0)
+#define MT_TXS7_F1_MPDU_RETRY_COUNT    GENMASK(31, 24)
+#define MT_TXS7_F1_MPDU_RETRY_BYTES    GENMASK(23, 0)
+
 struct mt7915_dfs_pulse {
        u32 max_width;          /* us */
        int max_pwr;            /* dbm */
index e5bd687..c25f8da 100644 (file)
@@ -139,12 +139,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
                if (type != NL80211_IFTYPE_STATION)
                        break;
 
-               /* next, try to find a free repeater entry for the sta */
-               i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
-                                REPEATER_BSSID_MAX - REPEATER_BSSID_START);
-               if (i)
-                       return i + 32 - 1;
-
                i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
                if (i)
                        return i - 1;
@@ -172,6 +166,22 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
        return -1;
 }
 
+static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
+               mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
+               memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0),
+                      sizeof(mvif->bitrate_mask.control[i].ht_mcs));
+               memset(mvif->bitrate_mask.control[i].vht_mcs, GENMASK(15, 0),
+                      sizeof(mvif->bitrate_mask.control[i].vht_mcs));
+               memset(mvif->bitrate_mask.control[i].he_mcs, GENMASK(15, 0),
+                      sizeof(mvif->bitrate_mask.control[i].he_mcs));
+       }
+}
+
 static int mt7915_add_interface(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif)
 {
@@ -241,6 +251,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
                vif->offload_flags = 0;
        vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
 
+       mt7915_init_bitrate_mask(vif);
+
 out:
        mutex_unlock(&dev->mt76.mutex);
 
@@ -798,7 +810,8 @@ mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
        /* TSF software read */
-       mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE);
+       mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+                MT_LPON_TCR_SW_READ);
        tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
        tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
 
@@ -827,7 +840,34 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
        mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
        /* TSF software overwrite */
-       mt76_set(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_WRITE);
+       mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+                MT_LPON_TCR_SW_WRITE);
+
+       mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 s64 timestamp)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_dev *dev = mt7915_hw_dev(hw);
+       struct mt7915_phy *phy = mt7915_hw_phy(hw);
+       bool band = phy != &dev->phy;
+       union {
+               u64 t64;
+               u32 t32[2];
+       } tsf = { .t64 = timestamp, };
+       u16 n;
+
+       mutex_lock(&dev->mt76.mutex);
+
+       n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
+       mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+       mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+       /* TSF software adjust*/
+       mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+                MT_LPON_TCR_SW_ADJUST);
 
        mutex_unlock(&dev->mt76.mutex);
 }
@@ -911,17 +951,15 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
        sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
 }
 
-static void
-mt7915_sta_rc_update(struct ieee80211_hw *hw,
-                    struct ieee80211_vif *vif,
-                    struct ieee80211_sta *sta,
-                    u32 changed)
+static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
 {
-       struct mt7915_dev *dev = mt7915_hw_dev(hw);
        struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct mt7915_dev *dev = msta->vif->phy->dev;
+       struct ieee80211_hw *hw = msta->vif->phy->mt76->hw;
+       u32 *changed = data;
 
        spin_lock_bh(&dev->sta_poll_lock);
-       msta->stats.changed |= changed;
+       msta->stats.changed |= *changed;
        if (list_empty(&msta->rc_list))
                list_add_tail(&msta->rc_list, &dev->sta_rc_list);
        spin_unlock_bh(&dev->sta_poll_lock);
@@ -929,6 +967,39 @@ mt7915_sta_rc_update(struct ieee80211_hw *hw,
        ieee80211_queue_work(hw, &dev->rc_work);
 }
 
+static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta,
+                                u32 changed)
+{
+       mt7915_sta_rc_work(&changed, sta);
+}
+
+static int
+mt7915_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       const struct cfg80211_bitrate_mask *mask)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
+       u32 changed;
+
+       if (mask->control[band].gi == NL80211_TXRATE_FORCE_LGI)
+               return -EINVAL;
+
+       changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+       mvif->bitrate_mask = *mask;
+
+       /* Update firmware rate control to add a boundary on top of table
+        * to limit the rate selection for each peer, so when set bitrates
+        * vht-mcs-5 1:9, which actually means nss = 1 mcs = 0~9. This only
+        * applies to data frames as for the other mgmt, mcast, bcast still
+        * use legacy rates as it is.
+        */
+       ieee80211_iterate_stations_atomic(hw, mt7915_sta_rc_work, &changed);
+
+       return 0;
+}
+
 static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta,
@@ -987,9 +1058,11 @@ const struct ieee80211_ops mt7915_ops = {
        .get_stats = mt7915_get_stats,
        .get_tsf = mt7915_get_tsf,
        .set_tsf = mt7915_set_tsf,
+       .offset_tsf = mt7915_offset_tsf,
        .get_survey = mt76_get_survey,
        .get_antenna = mt76_get_antenna,
        .set_antenna = mt7915_set_antenna,
+       .set_bitrate_mask = mt7915_set_bitrate_mask,
        .set_coverage_class = mt7915_set_coverage_class,
        .sta_statistics = mt7915_sta_statistics,
        .sta_set_4addr = mt7915_sta_set_4addr,
index b3f14ff..863aa18 100644 (file)
@@ -88,28 +88,28 @@ struct mt7915_fw_region {
 #define HE_PHY(p, c)                   u8_get_bits(c, IEEE80211_HE_PHY_##p)
 #define HE_MAC(m, c)                   u8_get_bits(c, IEEE80211_HE_MAC_##m)
 
-static enum mt7915_cipher_type
+static enum mcu_cipher_type
 mt7915_mcu_get_cipher(int cipher)
 {
        switch (cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
+               return MCU_CIPHER_WEP40;
        case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
+               return MCU_CIPHER_WEP104;
        case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
+               return MCU_CIPHER_TKIP;
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               return MT_CIPHER_BIP_CMAC_128;
+               return MCU_CIPHER_BIP_CMAC_128;
        case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
+               return MCU_CIPHER_AES_CCMP;
        case WLAN_CIPHER_SUITE_CCMP_256:
-               return MT_CIPHER_CCMP_256;
+               return MCU_CIPHER_CCMP_256;
        case WLAN_CIPHER_SUITE_GCMP:
-               return MT_CIPHER_GCMP;
+               return MCU_CIPHER_GCMP;
        case WLAN_CIPHER_SUITE_GCMP_256:
-               return MT_CIPHER_GCMP_256;
+               return MCU_CIPHER_GCMP_256;
        case WLAN_CIPHER_SUITE_SMS4:
-               return MT_CIPHER_WAPI;
+               return MCU_CIPHER_WAPI;
        default:
                return MT_CIPHER_NONE;
        }
@@ -147,10 +147,10 @@ mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
 }
 
 static u8
-mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif,
-                   struct ieee80211_sta *sta)
+mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
 {
-       enum nl80211_band band = mphy->chandef.chan->band;
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
        struct ieee80211_sta_ht_cap *ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap;
        const struct ieee80211_sta_he_cap *he_cap;
@@ -163,7 +163,7 @@ mt7915_get_phy_mode(struct mt76_phy *mphy, struct ieee80211_vif *vif,
        } else {
                struct ieee80211_supported_band *sband;
 
-               sband = mphy->hw->wiphy->bands[band];
+               sband = mvif->phy->mt76->hw->wiphy->bands[band];
 
                ht_cap = &sband->ht_cap;
                vht_cap = &sband->vht_cap;
@@ -209,6 +209,112 @@ mt7915_mcu_get_sta_nss(u16 mcs_map)
        return nss - 1;
 }
 
+static void
+mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
+                         const u16 *mask)
+{
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
+       int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+       u16 mcs_map;
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_80P80:
+               mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+               break;
+       default:
+               mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+               break;
+       }
+
+       for (nss = 0; nss < max_nss; nss++) {
+               int mcs;
+
+               switch ((mcs_map >> (2 * nss)) & 0x3) {
+               case IEEE80211_HE_MCS_SUPPORT_0_11:
+                       mcs = GENMASK(11, 0);
+                       break;
+               case IEEE80211_HE_MCS_SUPPORT_0_9:
+                       mcs = GENMASK(9, 0);
+                       break;
+               case IEEE80211_HE_MCS_SUPPORT_0_7:
+                       mcs = GENMASK(7, 0);
+                       break;
+               default:
+                       mcs = 0;
+               }
+
+               mcs = mcs ? fls(mcs & mask[nss]) - 1 : -1;
+
+               switch (mcs) {
+               case 0 ... 7:
+                       mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+                       break;
+               case 8 ... 9:
+                       mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+                       break;
+               case 10 ... 11:
+                       mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+                       break;
+               default:
+                       mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+                       break;
+               }
+               mcs_map &= ~(0x3 << (nss * 2));
+               mcs_map |= mcs << (nss * 2);
+
+               /* only support 2ss on 160MHz */
+               if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+                       break;
+       }
+
+       *he_mcs = cpu_to_le16(mcs_map);
+}
+
+static void
+mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
+                          const u16 *mask)
+{
+       u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+       int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+       u16 mcs;
+
+       for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
+               switch (mcs_map & 0x3) {
+               case IEEE80211_VHT_MCS_SUPPORT_0_9:
+                       mcs = GENMASK(9, 0);
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_8:
+                       mcs = GENMASK(8, 0);
+                       break;
+               case IEEE80211_VHT_MCS_SUPPORT_0_7:
+                       mcs = GENMASK(7, 0);
+                       break;
+               default:
+                       mcs = 0;
+               }
+
+               vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
+
+               /* only support 2ss on 160MHz */
+               if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+                       break;
+       }
+}
+
+static void
+mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
+                         const u8 *mask)
+{
+       int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
+
+       for (nss = 0; nss < max_nss; nss++)
+               ht_mcs[nss] = sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
+}
+
 static int
 mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
                          struct sk_buff *skb, int seq)
@@ -350,6 +456,24 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
 }
 
 static void
+mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt7915_mcu_thermal_notify *t;
+       struct mt7915_phy *phy;
+
+       t = (struct mt7915_mcu_thermal_notify *)skb->data;
+       if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
+               return;
+
+       if (t->ctrl.band_idx && dev->mt76.phy2)
+               mphy = dev->mt76.phy2;
+
+       phy = (struct mt7915_phy *)mphy->priv;
+       phy->throttle_state = t->ctrl.duty.duty_cycle;
+}
+
+static void
 mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
 {
        struct mt76_phy *mphy = &dev->mt76.phy;
@@ -469,6 +593,7 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
        u16 attempts = le16_to_cpu(ra->attempts);
        u16 curr = le16_to_cpu(ra->curr_rate);
        u16 wcidx = le16_to_cpu(ra->wlan_idx);
+       struct ieee80211_tx_status status = {};
        struct mt76_phy *mphy = &dev->mphy;
        struct mt7915_sta_stats *stats;
        struct mt7915_sta *msta;
@@ -500,6 +625,13 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
 
                stats->per = 1000 * (attempts - success) / attempts;
        }
+
+       status.sta = wcid_to_sta(wcid);
+       if (!status.sta)
+               return;
+
+       status.rate = &stats->tx_rate;
+       ieee80211_tx_status_ext(mphy->hw, &status);
 }
 
 static void
@@ -531,6 +663,9 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
        struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
 
        switch (rxd->ext_eid) {
+       case MCU_EXT_EVENT_THERMAL_PROTECT:
+               mt7915_mcu_rx_thermal_notify(dev, skb);
+               break;
        case MCU_EXT_EVENT_RDD_REPORT:
                mt7915_mcu_rx_radar_detected(dev, skb);
                break;
@@ -733,7 +868,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
                memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
                bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
                bss->dtim_period = vif->bss_conf.dtim_period;
-               bss->phy_mode = mt7915_get_phy_mode(phy->mt76, vif, NULL);
+               bss->phy_mode = mt7915_get_phy_mode(vif, NULL);
        } else {
                memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
        }
@@ -1072,14 +1207,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
                sec_key = &sec->key[0];
                sec_key->cipher_len = sizeof(*sec_key);
 
-               if (cipher == MT_CIPHER_BIP_CMAC_128) {
-                       sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+               if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+                       sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
                        sec_key->key_id = bip->keyidx;
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, bip->key, 16);
 
                        sec_key = &sec->key[1];
-                       sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+                       sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
                        sec_key->cipher_len = sizeof(*sec_key);
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, key->key, 16);
@@ -1091,14 +1226,14 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
                        sec_key->key_len = key->keylen;
                        memcpy(sec_key->key, key->key, key->keylen);
 
-                       if (cipher == MT_CIPHER_TKIP) {
+                       if (cipher == MCU_CIPHER_TKIP) {
                                /* Rx/Tx MIC keys are swapped */
                                memcpy(sec_key->key + 16, key->key + 24, 8);
                                memcpy(sec_key->key + 24, key->key + 16, 8);
                        }
 
                        /* store key_conf for BIP batch update */
-                       if (cipher == MT_CIPHER_AES_CCMP) {
+                       if (cipher == MCU_CIPHER_AES_CCMP) {
                                memcpy(bip->key, key->key, key->keylen);
                                bip->keyidx = key->keyidx;
                        }
@@ -1336,8 +1471,11 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
 static void
 mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
 {
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
        struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
        struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+       enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+       const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
        struct sta_rec_he *he;
        struct tlv *tlv;
        u32 cap = 0;
@@ -1428,15 +1566,18 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
        case IEEE80211_STA_RX_BW_160:
                if (elem->phy_cap_info[0] &
                    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
-                       he->max_nss_mcs[CMD_HE_MCS_BW8080] =
-                               he_cap->he_mcs_nss_supp.rx_mcs_80p80;
+                       mt7915_mcu_set_sta_he_mcs(sta,
+                                                 &he->max_nss_mcs[CMD_HE_MCS_BW8080],
+                                                 mcs_mask);
 
-               he->max_nss_mcs[CMD_HE_MCS_BW160] =
-                               he_cap->he_mcs_nss_supp.rx_mcs_160;
+               mt7915_mcu_set_sta_he_mcs(sta,
+                                         &he->max_nss_mcs[CMD_HE_MCS_BW160],
+                                         mcs_mask);
                fallthrough;
        default:
-               he->max_nss_mcs[CMD_HE_MCS_BW80] =
-                               he_cap->he_mcs_nss_supp.rx_mcs_80;
+               mt7915_mcu_set_sta_he_mcs(sta,
+                                         &he->max_nss_mcs[CMD_HE_MCS_BW80],
+                                         mcs_mask);
                break;
        }
 
@@ -1544,27 +1685,18 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
                HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
 }
 
-static int
-mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
-                 struct ieee80211_sta *sta)
+static void
+mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
 {
-       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
-       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
-       struct sk_buff *skb;
-       int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru);
-
-       if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
-               return 0;
-
-       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
+       struct sta_rec_vht *vht;
+       struct tlv *tlv;
 
-       /* starec muru */
-       mt7915_mcu_sta_muru_tlv(skb, sta);
+       tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
 
-       return mt76_mcu_skb_send_msg(&dev->mt76, skb,
-                                    MCU_EXT_CMD(STA_REC_UPDATE), true);
+       vht = (struct sta_rec_vht *)tlv;
+       vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+       vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+       vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
 }
 
 static void
@@ -1616,17 +1748,6 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
                        mt7915_mcu_sta_amsdu_tlv(skb, sta);
        }
 
-       /* starec vht */
-       if (sta->vht_cap.vht_supported) {
-               struct sta_rec_vht *vht;
-
-               tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
-               vht = (struct sta_rec_vht *)tlv;
-               vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
-               vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
-               vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
-       }
-
        /* starec he */
        if (sta->he_cap.has_he)
                mt7915_mcu_sta_he_tlv(skb, sta);
@@ -2016,26 +2137,21 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                vc = mt7915_get_he_phy_cap(phy, vif);
                ve = &vc->he_cap_elem;
 
-               ebfee = !!((HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) ||
-                           HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4])) &&
+               ebfee = !!(HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) &&
                           HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]));
-               ebf = !!((HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) ||
-                         HE_PHY(CAP4_MU_BEAMFORMER, ve->phy_cap_info[4])) &&
+               ebf = !!(HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) &&
                         HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]));
        } else if (sta->vht_cap.vht_supported) {
                struct ieee80211_sta_vht_cap *pc;
                struct ieee80211_sta_vht_cap *vc;
-               u32 cr, ce;
 
                pc = &sta->vht_cap;
                vc = &phy->mt76->sband_5g.sband.vht_cap;
-               cr = IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
-                    IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
-               ce = IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
-                    IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
 
-               ebfee = !!((pc->cap & cr) && (vc->cap & ce));
-               ebf = !!((vc->cap & cr) && (pc->cap & ce));
+               ebfee = !!((pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+                          (vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+               ebf = !!((vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+                        (pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
        }
 
        /* must keep each tag independent */
@@ -2079,57 +2195,47 @@ static void
 mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                             struct ieee80211_vif *vif, struct ieee80211_sta *sta)
 {
-       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
-       struct mt76_phy *mphy = &dev->mphy;
-       enum nl80211_band band;
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+       struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+       enum nl80211_band band = chandef->chan->band;
        struct sta_rec_ra *ra;
        struct tlv *tlv;
-       u32 supp_rate, n_rates, cap = sta->wme ? STA_CAP_WMM : 0;
-       u8 i, nss = sta->rx_nss, mcs = 0;
+       u32 supp_rate = sta->supp_rates[band];
+       u32 cap = sta->wme ? STA_CAP_WMM : 0;
 
        tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
        ra = (struct sta_rec_ra *)tlv;
 
-       if (msta->wcid.ext_phy && dev->mt76.phy2)
-               mphy = dev->mt76.phy2;
-
-       band = mphy->chandef.chan->band;
-       supp_rate = sta->supp_rates[band];
-       n_rates = hweight32(supp_rate);
-
        ra->valid = true;
        ra->auto_rate = true;
-       ra->phy_mode = mt7915_get_phy_mode(mphy, vif, sta);
-       ra->channel = mphy->chandef.chan->hw_value;
+       ra->phy_mode = mt7915_get_phy_mode(vif, sta);
+       ra->channel = chandef->chan->hw_value;
        ra->bw = sta->bandwidth;
-       ra->rate_len = n_rates;
        ra->phy.bw = sta->bandwidth;
 
-       if (n_rates) {
+       if (supp_rate) {
+               supp_rate &= mask->control[band].legacy;
+               ra->rate_len = hweight32(supp_rate);
+
                if (band == NL80211_BAND_2GHZ) {
                        ra->supp_mode = MODE_CCK;
                        ra->supp_cck_rate = supp_rate & GENMASK(3, 0);
-                       ra->phy.type = MT_PHY_TYPE_CCK;
 
-                       if (n_rates > 4) {
+                       if (ra->rate_len > 4) {
                                ra->supp_mode |= MODE_OFDM;
                                ra->supp_ofdm_rate = supp_rate >> 4;
-                               ra->phy.type = MT_PHY_TYPE_OFDM;
                        }
                } else {
                        ra->supp_mode = MODE_OFDM;
                        ra->supp_ofdm_rate = supp_rate;
-                       ra->phy.type = MT_PHY_TYPE_OFDM;
                }
        }
 
        if (sta->ht_cap.ht_supported) {
-               for (i = 0; i < nss; i++)
-                       ra->ht_mcs[i] = sta->ht_cap.mcs.rx_mask[i];
+               const u8 *mcs_mask = mask->control[band].ht_mcs;
 
-               ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
                ra->supp_mode |= MODE_HT;
-               mcs = hweight32(le32_to_cpu(ra->supp_ht_mcs)) - 1;
                ra->af = sta->ht_cap.ampdu_factor;
                ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
 
@@ -2144,13 +2250,16 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                        cap |= STA_CAP_RX_STBC;
                if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
                        cap |= STA_CAP_LDPC;
+
+               mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, mcs_mask);
+               ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
        }
 
        if (sta->vht_cap.vht_supported) {
-               u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
-               u16 vht_mcs;
-               u8 af, mcs_prev;
+               const u16 *mcs_mask = mask->control[band].vht_mcs;
+               u8 af;
 
+               ra->supp_mode |= MODE_VHT;
                af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
                               sta->vht_cap.cap);
                ra->af = max_t(u8, ra->af, af);
@@ -2167,33 +2276,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
                        cap |= STA_CAP_VHT_LDPC;
 
-               ra->supp_mode |= MODE_VHT;
-               for (mcs = 0, i = 0; i < nss; i++, mcs_map >>= 2) {
-                       switch (mcs_map & 0x3) {
-                       case IEEE80211_VHT_MCS_SUPPORT_0_9:
-                               vht_mcs = GENMASK(9, 0);
-                               break;
-                       case IEEE80211_VHT_MCS_SUPPORT_0_8:
-                               vht_mcs = GENMASK(8, 0);
-                               break;
-                       case IEEE80211_VHT_MCS_SUPPORT_0_7:
-                               vht_mcs = GENMASK(7, 0);
-                               break;
-                       default:
-                               vht_mcs = 0;
-                       }
-
-                       ra->supp_vht_mcs[i] = cpu_to_le16(vht_mcs);
-
-                       mcs_prev = hweight16(vht_mcs) - 1;
-                       if (mcs_prev > mcs)
-                               mcs = mcs_prev;
-
-                       /* only support 2ss on 160MHz */
-                       if (i > 1 && (ra->bw == CMD_CBW_160MHZ ||
-                                     ra->bw == CMD_CBW_8080MHZ))
-                               break;
-               }
+               mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, mcs_mask);
        }
 
        if (sta->he_cap.has_he) {
@@ -2201,28 +2284,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
                cap |= STA_CAP_HE;
        }
 
-       ra->sta_status = cpu_to_le32(cap);
-
-       switch (BIT(fls(ra->supp_mode) - 1)) {
-       case MODE_VHT:
-               ra->phy.type = MT_PHY_TYPE_VHT;
-               ra->phy.mcs = mcs;
-               ra->phy.nss = nss;
-               ra->phy.stbc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC);
-               ra->phy.ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
-               ra->phy.sgi =
-                       !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
-               break;
-       case MODE_HT:
-               ra->phy.type = MT_PHY_TYPE_HT;
-               ra->phy.mcs = mcs;
-               ra->phy.ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
-               ra->phy.stbc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC);
-               ra->phy.sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
-               break;
-       default:
-               break;
-       }
+       ra->sta_cap = cpu_to_le32(cap);
 }
 
 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -2243,6 +2305,87 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                                     MCU_EXT_CMD(STA_REC_UPDATE), true);
 }
 
+int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct sk_buff *skb;
+       int len;
+
+       if (!sta->he_cap.has_he)
+               return 0;
+
+       len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_he);
+
+       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       mt7915_mcu_sta_he_tlv(skb, sta);
+
+       return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                    MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
+static int
+mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                    struct ieee80211_sta *sta)
+{
+#define MT_STA_BSS_GROUP               1
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct {
+               __le32 action;
+               u8 wlan_idx_lo;
+               u8 status;
+               u8 wlan_idx_hi;
+               u8 rsv0[5];
+               __le32 val;
+               u8 rsv1[8];
+       } __packed req = {
+               .action = cpu_to_le32(MT_STA_BSS_GROUP),
+               .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
+               .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
+               .val = cpu_to_le32(mvif->idx % 16),
+       };
+
+       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req,
+                                sizeof(req), true);
+}
+
+static int
+mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                 struct ieee80211_sta *sta)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct sk_buff *skb;
+       int ret;
+
+       if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
+               return 0;
+
+       ret = mt7915_mcu_add_group(dev, vif, sta);
+       if (ret)
+               return ret;
+
+       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+                                      MT7915_STA_UPDATE_MAX_SIZE);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       /* wait until TxBF and MU ready to update stare vht */
+
+       /* starec muru */
+       mt7915_mcu_sta_muru_tlv(skb, sta);
+       /* starec vht */
+       mt7915_mcu_sta_vht_tlv(skb, sta);
+
+       return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                    MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
 int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta, bool enable)
 {
@@ -2253,17 +2396,14 @@ int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
 
        /* must keep the order */
        ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
-       if (ret)
+       if (ret || !enable)
                return ret;
 
        ret = mt7915_mcu_add_mu(dev, vif, sta);
        if (ret)
                return ret;
 
-       if (enable)
-               return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
-
-       return 0;
+       return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
 }
 
 int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
@@ -2432,7 +2572,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
                cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
 
        buf = (u8 *)tlv + sizeof(*cont);
-       mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,
+       mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
                              true);
        memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
 }
@@ -3307,7 +3447,8 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
 {
        struct mt7915_mcu_eeprom_info req = {
-               .addr = cpu_to_le32(round_down(offset, 16)),
+               .addr = cpu_to_le32(round_down(offset,
+                                   MT7915_EEPROM_BLOCK_SIZE)),
        };
        struct mt7915_mcu_eeprom_info *res;
        struct sk_buff *skb;
@@ -3321,7 +3462,7 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
 
        res = (struct mt7915_mcu_eeprom_info *)skb->data;
        buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
-       memcpy(buf, res->data, 16);
+       memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE);
        dev_kfree_skb(skb);
 
        return 0;
@@ -3440,8 +3581,9 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
 {
        struct mt7915_dev *dev = phy->dev;
        struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
-       u16 total = 2, idx, center_freq = chandef->center_freq1;
+       u16 total = 2, center_freq = chandef->center_freq1;
        u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+       int idx;
 
        if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
                return 0;
@@ -3469,22 +3611,128 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
        return 0;
 }
 
-int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
+{
+       /* strict order */
+       static const enum mt7915_chan_mib_offs offs[] = {
+               MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME
+       };
+       struct mt76_channel_state *state = phy->mt76->chan_state;
+       struct mt76_channel_state *state_ts = &phy->state_ts;
+       struct mt7915_dev *dev = phy->dev;
+       struct mt7915_mcu_mib *res, req[4];
+       struct sk_buff *skb;
+       int i, ret;
+
+       for (i = 0; i < 4; i++) {
+               req[i].band = cpu_to_le32(phy != &dev->phy);
+               req[i].offs = cpu_to_le32(offs[i]);
+       }
+
+       ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
+                                       req, sizeof(req), true, &skb);
+       if (ret)
+               return ret;
+
+       res = (struct mt7915_mcu_mib *)(skb->data + 20);
+
+       if (chan_switch)
+               goto out;
+
+#define __res_u64(s) le64_to_cpu(res[s].data)
+       state->cc_busy += __res_u64(0) - state_ts->cc_busy;
+       state->cc_tx += __res_u64(1) - state_ts->cc_tx;
+       state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
+       state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+
+out:
+       state_ts->cc_busy = __res_u64(0);
+       state_ts->cc_tx = __res_u64(1);
+       state_ts->cc_bss_rx = __res_u64(2);
+       state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+#undef __res_u64
+
+       dev_kfree_skb(skb);
+
+       return 0;
+}
+
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
 {
+       struct mt7915_dev *dev = phy->dev;
        struct {
                u8 ctrl_id;
                u8 action;
-               u8 band;
+               u8 dbdc_idx;
                u8 rsv[5];
        } req = {
                .ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
-               .action = index,
+               .dbdc_idx = phy != &dev->phy,
        };
 
        return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
                                 sizeof(req), true);
 }
 
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
+{
+       struct mt7915_dev *dev = phy->dev;
+       struct {
+               struct mt7915_mcu_thermal_ctrl ctrl;
+
+               __le32 trigger_temp;
+               __le32 restore_temp;
+               __le16 sustain_time;
+               u8 rsv[2];
+       } __packed req = {
+               .ctrl = {
+                       .band_idx = phy != &dev->phy,
+               },
+       };
+       int level;
+
+#define TRIGGER_TEMPERATURE    122
+#define RESTORE_TEMPERATURE    116
+#define SUSTAIN_PERIOD         10
+
+       if (!state) {
+               req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
+               goto out;
+       }
+
+       /* set duty cycle and level */
+       for (level = 0; level < 4; level++) {
+               int ret;
+
+               req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG;
+               req.ctrl.duty.duty_level = level;
+               req.ctrl.duty.duty_cycle = state;
+               state = state * 4 / 5;
+
+               ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+                                       &req, sizeof(req.ctrl), false);
+               if (ret)
+                       return ret;
+       }
+
+       /* currently use fixed values for throttling, and would be better
+        * to implement thermal zone for dynamic trip in the long run.
+        */
+
+       /* set high-temperature trigger threshold */
+       req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
+       req.trigger_temp = cpu_to_le32(TRIGGER_TEMPERATURE);
+       req.restore_temp = cpu_to_le32(RESTORE_TEMPERATURE);
+       req.sustain_time = cpu_to_le16(SUSTAIN_PERIOD);
+
+out:
+       req.ctrl.type.protect_type = 1;
+       req.ctrl.type.trigger_type = 1;
+
+       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+                                &req, sizeof(req), false);
+}
+
 int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
 {
        struct {
@@ -3505,7 +3753,6 @@ int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
 
 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
 {
-#define MT7915_SKU_RATE_NUM            161
        struct mt7915_dev *dev = phy->dev;
        struct mt76_phy *mphy = phy->mt76;
        struct ieee80211_hw *hw = mphy->hw;
@@ -3555,6 +3802,39 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
                                 sizeof(req), true);
 }
 
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
+{
+#define RATE_POWER_INFO        2
+       struct mt7915_dev *dev = phy->dev;
+       struct {
+               u8 format_id;
+               u8 category;
+               u8 band;
+               u8 _rsv;
+       } __packed req = {
+               .format_id = 7,
+               .category = RATE_POWER_INFO,
+               .band = phy != &dev->phy,
+       };
+       s8 res[MT7915_SKU_RATE_NUM][2];
+       struct sk_buff *skb;
+       int ret, i;
+
+       ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+                                       MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+                                       &req, sizeof(req), true, &skb);
+       if (ret)
+               return ret;
+
+       memcpy(res, skb->data + 4, sizeof(res));
+       for (i = 0; i < len; i++)
+               txpower[i] = res[i][req.band];
+
+       dev_kfree_skb(skb);
+
+       return 0;
+}
+
 int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
                              u8 en)
 {
@@ -3613,57 +3893,50 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band)
                                 &req, sizeof(req), false);
 }
 
-int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev)
-{
-#define MT_BF_MODULE_UPDATE               25
-       struct {
-               u8 action;
-               u8 bf_num;
-               u8 bf_bitmap;
-               u8 bf_sel[8];
-               u8 rsv[8];
-       } __packed req = {
-               .action = MT_BF_MODULE_UPDATE,
-               .bf_num = 2,
-               .bf_bitmap = GENMASK(1, 0),
-       };
-
-       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
-                                sizeof(req), true);
-}
-
-int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev)
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action)
 {
-#define MT_BF_TYPE_UPDATE              20
        struct {
                u8 action;
-               bool ebf;
-               bool ibf;
-               u8 rsv;
+               union {
+                       struct {
+                               u8 snd_mode;
+                               u8 sta_num;
+                               u8 rsv;
+                               u8 wlan_idx[4];
+                               __le32 snd_period;      /* ms */
+                       } __packed snd;
+                       struct {
+                               bool ebf;
+                               bool ibf;
+                               u8 rsv;
+                       } __packed type;
+                       struct {
+                               u8 bf_num;
+                               u8 bf_bitmap;
+                               u8 bf_sel[8];
+                               u8 rsv[5];
+                       } __packed mod;
+               };
        } __packed req = {
-               .action = MT_BF_TYPE_UPDATE,
-               .ebf = true,
-               .ibf = dev->ibf,
+               .action = action,
        };
 
-       return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
-                                sizeof(req), true);
-}
-
-int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev)
-{
-#define MT_BF_PROCESSING               4
-       struct {
-               u8 action;
-               u8 snd_mode;
-               u8 sta_num;
-               u8 rsv;
-               u8 wlan_idx[4];
-               __le32 snd_period;      /* ms */
-       } __packed req = {
-               .action = true,
-               .snd_mode = MT_BF_PROCESSING,
-       };
+#define MT_BF_PROCESSING       4
+       switch (action) {
+       case MT_BF_SOUNDING_ON:
+               req.snd.snd_mode = MT_BF_PROCESSING;
+               break;
+       case MT_BF_TYPE_UPDATE:
+               req.type.ebf = true;
+               req.type.ibf = dev->ibf;
+               break;
+       case MT_BF_MODULE_UPDATE:
+               req.mod.bf_num = 2;
+               req.mod.bf_bitmap = GENMASK(1, 0);
+               break;
+       default:
+               return -EINVAL;
+       }
 
        return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
                                 sizeof(req), true);
index 42582a6..edd3ba3 100644 (file)
@@ -68,6 +68,29 @@ struct mt7915_mcu_rxd {
        u8 s2d_index;
 };
 
+struct mt7915_mcu_thermal_ctrl {
+       u8 ctrl_id;
+       u8 band_idx;
+       union {
+               struct {
+                       u8 protect_type; /* 1: duty admit, 2: radio off */
+                       u8 trigger_type; /* 0: low, 1: high */
+               } __packed type;
+               struct {
+                       u8 duty_level;  /* level 0~3 */
+                       u8 duty_cycle;
+               } __packed duty;
+       };
+} __packed;
+
+struct mt7915_mcu_thermal_notify {
+       struct mt7915_mcu_rxd rxd;
+
+       struct mt7915_mcu_thermal_ctrl ctrl;
+       __le32 temperature;
+       u8 rsv[8];
+} __packed;
+
 struct mt7915_mcu_csa_notify {
        struct mt7915_mcu_rxd rxd;
 
@@ -193,6 +216,19 @@ struct mt7915_mcu_phy_rx_info {
 #define MT_RA_RATE_DCM_EN              BIT(4)
 #define MT_RA_RATE_BW                  GENMASK(14, 13)
 
+struct mt7915_mcu_mib {
+       __le32 band;
+       __le32 offs;
+       __le64 data;
+} __packed;
+
+enum mt7915_chan_mib_offs {
+       MIB_BUSY_TIME = 14,
+       MIB_TX_TIME = 81,
+       MIB_RX_TIME,
+       MIB_OBSS_AIRTIME = 86
+};
+
 struct edca {
        u8 queue;
        u8 set;
@@ -262,6 +298,7 @@ enum {
        MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
        MCU_EXT_CMD_TXBF_ACTION = 0x1e,
        MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+       MCU_EXT_CMD_THERMAL_PROT = 0x23,
        MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
        MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
        MCU_EXT_CMD_EDCA_UPDATE = 0x27,
@@ -277,6 +314,7 @@ enum {
        MCU_EXT_CMD_MUAR_UPDATE = 0x48,
        MCU_EXT_CMD_SET_RX_PATH = 0x4e,
        MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+       MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
        MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
        MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
        MCU_EXT_CMD_SCS_CTRL = 0x82,
@@ -919,7 +957,7 @@ struct sta_rec_ra {
        u8 op_vht_rx_nss;
        u8 op_vht_rx_nss_type;
 
-       __le32 sta_status;
+       __le32 sta_cap;
 
        struct ra_phy phy;
 } __packed;
@@ -1034,18 +1072,17 @@ enum {
        STA_REC_MAX_NUM
 };
 
-enum mt7915_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CCMP_256,
-       MT_CIPHER_GCMP,
-       MT_CIPHER_GCMP_256,
-       MT_CIPHER_WAPI,
-       MT_CIPHER_BIP_CMAC_128,
+enum mcu_cipher_type {
+       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_WEP104,
+       MCU_CIPHER_WEP128,
+       MCU_CIPHER_TKIP,
+       MCU_CIPHER_AES_CCMP,
+       MCU_CIPHER_CCMP_256,
+       MCU_CIPHER_GCMP,
+       MCU_CIPHER_GCMP_256,
+       MCU_CIPHER_WAPI,
+       MCU_CIPHER_BIP_CMAC_128,
 };
 
 enum {
@@ -1067,10 +1104,27 @@ enum {
 };
 
 enum {
+       THERMAL_PROTECT_PARAMETER_CTRL,
+       THERMAL_PROTECT_BASIC_INFO,
+       THERMAL_PROTECT_ENABLE,
+       THERMAL_PROTECT_DISABLE,
+       THERMAL_PROTECT_DUTY_CONFIG,
+       THERMAL_PROTECT_MECH_INFO,
+       THERMAL_PROTECT_DUTY_INFO,
+       THERMAL_PROTECT_STATE_ACT,
+};
+
+enum {
        MT_EBF = BIT(0),        /* explicit beamforming */
        MT_IBF = BIT(1)         /* implicit beamforming */
 };
 
+enum {
+       MT_BF_SOUNDING_ON = 1,
+       MT_BF_TYPE_UPDATE = 20,
+       MT_BF_MODULE_UPDATE = 25
+};
+
 #define MT7915_WTBL_UPDATE_MAX_SIZE    (sizeof(struct wtbl_req_hdr) +  \
                                         sizeof(struct wtbl_generic) +  \
                                         sizeof(struct wtbl_rx) +       \
index 4ea8972..3f613fa 100644 (file)
@@ -9,7 +9,7 @@
 #include "../mt76.h"
 #include "regs.h"
 
-#define MT7915_MAX_INTERFACES          32
+#define MT7915_MAX_INTERFACES          19
 #define MT7915_MAX_WMM_SETS            4
 #define MT7915_WTBL_SIZE               288
 #define MT7915_WTBL_RESERVED           (MT7915_WTBL_SIZE - 1)
@@ -31,6 +31,7 @@
 #define MT7915_ROM_PATCH               "mediatek/mt7915_rom_patch.bin"
 
 #define MT7915_EEPROM_SIZE             3584
+#define MT7915_EEPROM_BLOCK_SIZE       16
 #define MT7915_TOKEN_SIZE              8192
 
 #define MT7915_CFEND_RATE_DEFAULT      0x49    /* OFDM 24M */
 #define MT7915_5G_RATE_DEFAULT         0x4b    /* OFDM 6M */
 #define MT7915_2G_RATE_DEFAULT         0x0     /* CCK 1M */
 
+#define MT7915_THERMAL_THROTTLE_MAX    100
+
+#define MT7915_SKU_RATE_NUM            161
+
 struct mt7915_vif;
 struct mt7915_sta;
 struct mt7915_dfs_pulse;
@@ -100,6 +105,7 @@ struct mt7915_vif {
        struct mt7915_phy *phy;
 
        struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+       struct cfg80211_bitrate_mask bitrate_mask;
 };
 
 struct mib_stats {
@@ -126,6 +132,9 @@ struct mt7915_phy {
 
        struct ieee80211_vif *monitor_vif;
 
+       struct thermal_cooling_device *cdev;
+       u8 throttle_state;
+
        u32 rxfilter;
        u64 omac_mask;
 
@@ -141,6 +150,7 @@ struct mt7915_phy {
        u32 ampdu_ref;
 
        struct mib_stats mib;
+       struct mt76_channel_state state_ts;
        struct list_head stats_list;
 
        u8 sta_work_count;
@@ -169,6 +179,7 @@ struct mt7915_dev {
        struct mt7915_hif *hif2;
 
        const struct mt76_bus_ops *bus_ops;
+       struct tasklet_struct irq_tasklet;
        struct mt7915_phy phy;
 
        u16 chainmask;
@@ -322,6 +333,8 @@ int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                             bool enable);
 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                             struct ieee80211_sta *sta);
+int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta);
 int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta);
 int mt7915_set_channel(struct mt7915_phy *phy);
@@ -342,9 +355,8 @@ int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
 int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
 int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
-int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
-int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev);
-int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action);
 int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
 int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
                            const struct mt7915_dfs_pulse *pulse);
@@ -352,7 +364,9 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
                            const struct mt7915_dfs_pattern *pattern);
 int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev);
 int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
-int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index);
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
 int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
 int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta, struct rate_info *rate);
@@ -374,9 +388,11 @@ void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
 static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask)
 {
        if (dev->hif2)
-               mt7915_dual_hif_set_irq_mask(dev, true, 0, mask);
+               mt7915_dual_hif_set_irq_mask(dev, false, 0, mask);
        else
-               mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+               mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+       tasklet_schedule(&dev->irq_tasklet);
 }
 
 static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
@@ -392,12 +408,9 @@ void mt7915_mac_reset_counters(struct mt7915_phy *phy);
 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
-                          struct sk_buff *skb, struct mt76_wcid *wcid,
+                          struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
                           struct ieee80211_key_conf *key, bool beacon);
 void mt7915_mac_set_timing(struct mt7915_phy *phy);
-int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb);
-void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb);
-void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb);
 int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
 void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -417,13 +430,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
                         struct sk_buff *skb);
 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
 void mt7915_stats_work(struct work_struct *work);
-void mt7915_txp_skb_unmap(struct mt76_dev *dev,
-                         struct mt76_txwi_cache *txwi);
 int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force);
 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
 void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
 void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
-void mt7915_update_channel(struct mt76_dev *mdev);
+void mt7915_update_channel(struct mt76_phy *mphy);
 int mt7915_init_debugfs(struct mt7915_dev *dev);
 #ifdef CONFIG_MAC80211_DEBUGFS
 void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
index 643f171..340b364 100644 (file)
@@ -94,11 +94,15 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
 }
 
 /* TODO: support 2/4/6/8 MSI-X vectors */
-static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
 {
-       struct mt7915_dev *dev = dev_instance;
+       struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
        u32 intr, intr1, mask;
 
+       mt76_wr(dev, MT_INT_MASK_CSR, 0);
+       if (dev->hif2)
+               mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
        intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
        intr &= dev->mt76.mmio.irqmask;
        mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
@@ -111,9 +115,6 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
                intr |= intr1;
        }
 
-       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
-               return IRQ_NONE;
-
        trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
 
        mask = intr & MT_INT_RX_DONE_ALL;
@@ -150,6 +151,20 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
                        wake_up(&dev->reset_wait);
                }
        }
+}
+
+static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+       struct mt7915_dev *dev = dev_instance;
+
+       mt76_wr(dev, MT_INT_MASK_CSR, 0);
+       if (dev->hif2)
+               mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+       if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+               return IRQ_NONE;
+
+       tasklet_schedule(&dev->irq_tasklet);
 
        return IRQ_HANDLED;
 }
@@ -240,6 +255,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
        if (ret)
                return ret;
 
+       mt76_pci_disable_aspm(pdev);
+
        if (id->device == 0x7916)
                return mt7915_pci_hif2_probe(pdev);
 
@@ -250,10 +267,18 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
 
        dev = container_of(mdev, struct mt7915_dev, mt76);
 
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (ret < 0)
+               goto free;
+
        ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
        if (ret)
                goto error;
 
+       tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+
+       mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
        /* master switch of PCIe tnterrupt enable */
        mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
 
@@ -266,10 +291,14 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
 
        ret = mt7915_register_device(dev);
        if (ret)
-               goto error;
+               goto free_irq;
 
        return 0;
+free_irq:
+       devm_free_irq(mdev->dev, pdev->irq, dev);
 error:
+       pci_free_irq_vectors(pdev);
+free:
        mt76_free_device(&dev->mt76);
 
        return ret;
index efe0f29..a213b5c 100644 (file)
 #define MT_TMAC_CTCR0_INS_DDLMT_EN             BIT(17)
 #define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN   BIT(18)
 
-#define MT_TMAC_FP0R0(_band)           MT_WF_TMAC(_band, 0x020)
-#define MT_TMAC_FP0R15(_band)          MT_WF_TMAC(_band, 0x080)
-#define MT_TMAC_FP0R18(_band)          MT_WF_TMAC(_band, 0x270)
-#define MT_TMAC_FP_MASK                        GENMASK(7, 0)
-
 #define MT_TMAC_TFCR0(_band)           MT_WF_TMAC(_band, 0x1e0)
 
 #define MT_WF_DMA_BASE(_band)          ((_band) ? 0xa1e00 : 0x21e00)
 #define MT_ETBF_TX_FB_CPL              GENMASK(31, 16)
 #define MT_ETBF_TX_FB_TRI              GENMASK(15, 0)
 
+#define MT_ETBF_RX_FB_CONT(_band)      MT_WF_ETBF(_band, 0x068)
+#define MT_ETBF_RX_FB_BW               GENMASK(7, 6)
+#define MT_ETBF_RX_FB_NC               GENMASK(5, 3)
+#define MT_ETBF_RX_FB_NR               GENMASK(2, 0)
+
 #define MT_ETBF_TX_APP_CNT(_band)      MT_WF_ETBF(_band, 0x0f0)
 #define MT_ETBF_TX_IBF_CNT             GENMASK(31, 16)
 #define MT_ETBF_TX_EBF_CNT             GENMASK(15, 0)
 #define MT_LPON_TCR(_band, n)          MT_WF_LPON(_band, 0x0a8 + (n) * 4)
 #define MT_LPON_TCR_SW_MODE            GENMASK(1, 0)
 #define MT_LPON_TCR_SW_WRITE           BIT(0)
+#define MT_LPON_TCR_SW_ADJUST          BIT(1)
+#define MT_LPON_TCR_SW_READ            GENMASK(1, 0)
 
 /* MIB: band 0(0x24800), band 1(0xa4800) */
 #define MT_WF_MIB_BASE(_band)          ((_band) ? 0xa4800 : 0x24800)
 #define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x014)
 #define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(15, 0)
 
-#define MT_MIB_SDR9(_band)             MT_WF_MIB(_band, 0x02c)
-#define MT_MIB_SDR9_BUSY_MASK          GENMASK(23, 0)
-
-#define MT_MIB_SDR16(_band)            MT_WF_MIB(_band, 0x048)
-#define MT_MIB_SDR16_BUSY_MASK         GENMASK(23, 0)
-
 #define MT_MIB_SDR34(_band)            MT_WF_MIB(_band, 0x090)
 #define MT_MIB_MU_BF_TX_CNT            GENMASK(15, 0)
 
-#define MT_MIB_SDR36(_band)            MT_WF_MIB(_band, 0x098)
-#define MT_MIB_SDR36_TXTIME_MASK       GENMASK(23, 0)
-#define MT_MIB_SDR37(_band)            MT_WF_MIB(_band, 0x09c)
-#define MT_MIB_SDR37_RXTIME_MASK       GENMASK(23, 0)
-
 #define MT_MIB_DR8(_band)              MT_WF_MIB(_band, 0x0c0)
 #define MT_MIB_DR9(_band)              MT_WF_MIB(_band, 0x0c4)
 #define MT_MIB_DR11(_band)             MT_WF_MIB(_band, 0x0cc)
 #define MT_MIB_BA_MISS_COUNT_MASK      GENMASK(15, 0)
 #define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(31, 16)
 
-#define MT_MIB_MB_SDR2(_band, n)       MT_WF_MIB(_band, 0x108 + ((n) << 4))
-#define MT_MIB_FRAME_RETRIES_COUNT_MASK        GENMASK(15, 0)
-
 #define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
 #define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x164 + ((n) << 2))
 #define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
 #define MT_WF_RFCR1_DROP_CFEND         BIT(7)
 #define MT_WF_RFCR1_DROP_CFACK         BIT(8)
 
-#define MT_WF_RMAC_MIB_TIME0(_band)    MT_WF_RMAC(_band, 0x03c4)
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
 #define MT_WF_RMAC_MIB_RXTIME_CLR      BIT(31)
 #define MT_WF_RMAC_MIB_RXTIME_EN       BIT(30)
 
-#define MT_WF_RMAC_MIB_AIRTIME14(_band)        MT_WF_RMAC(_band, 0x03b8)
-#define MT_MIB_OBSSTIME_MASK           GENMASK(23, 0)
-#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
-
 /* WFDMA0 */
 #define MT_WFDMA0_BASE                 0xd4000
 #define MT_WFDMA0(ofs)                 (MT_WFDMA0_BASE + (ofs))
index f9d81e3..b220b33 100644 (file)
@@ -464,10 +464,17 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
 static void
 mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
 {
-       if (en)
+       mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
+
+       if (en) {
+               struct mt7915_dev *dev = phy->dev;
+
                mt7915_tm_update_channel(phy);
 
-       mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+               /* read-clear */
+               mt76_rr(dev, MT_MIB_SDR3(phy != &dev->phy));
+               mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+       }
 }
 
 static int
@@ -690,7 +697,11 @@ static int
 mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
 {
        struct mt7915_phy *phy = mphy->priv;
+       struct mt7915_dev *dev = phy->dev;
+       bool ext_phy = phy != &dev->phy;
+       enum mt76_rxq_id q;
        void *rx, *rssi;
+       u16 fcs_err;
        int i;
 
        rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
@@ -735,6 +746,12 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
 
        nla_nest_end(msg, rx);
 
+       fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+                                MT_MIB_SDR3_FCS_ERR_MASK);
+       q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
+       mphy->test.rx_stats.packets[q] += fcs_err;
+       mphy->test.rx_stats.fcs_error[q] += fcs_err;
+
        return 0;
 }
 
index 8f8533e..397a6b5 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ISC
+/* SPDX-License-Identifier: ISC */
 /* Copyright (C) 2020 MediaTek Inc. */
 
 #ifndef __MT7915_TESTMODE_H
index e531666..0ebb599 100644 (file)
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: ISC
+# SPDX-License-Identifier: ISC
 
 obj-$(CONFIG_MT7921E) += mt7921e.o
 
index 6ee423d..77468bd 100644 (file)
@@ -184,7 +184,10 @@ mt7921_txpwr(struct seq_file *s, void *data)
        struct mt7921_txpwr txpwr;
        int ret;
 
+       mt7921_mutex_acquire(dev);
        ret = mt7921_get_txpwr_info(dev, &txpwr);
+       mt7921_mutex_release(dev);
+
        if (ret)
                return ret;
 
@@ -247,6 +250,9 @@ mt7921_pm_set(void *data, u64 val)
        ieee80211_iterate_active_interfaces(mphy->hw,
                                            IEEE80211_IFACE_ITER_RESUME_ALL,
                                            mt7921_pm_interface_iter, mphy->priv);
+
+       mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+
        mt7921_mutex_release(dev);
 
        return 0;
@@ -265,6 +271,36 @@ mt7921_pm_get(void *data, u64 *val)
 DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n");
 
 static int
+mt7921_deep_sleep_set(void *data, u64 val)
+{
+       struct mt7921_dev *dev = data;
+       struct mt76_connac_pm *pm = &dev->pm;
+       bool enable = !!val;
+
+       mt7921_mutex_acquire(dev);
+       if (pm->ds_enable != enable) {
+               mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
+               pm->ds_enable = enable;
+       }
+       mt7921_mutex_release(dev);
+
+       return 0;
+}
+
+static int
+mt7921_deep_sleep_get(void *data, u64 *val)
+{
+       struct mt7921_dev *dev = data;
+
+       *val = dev->pm.ds_enable;
+
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ds, mt7921_deep_sleep_get,
+                        mt7921_deep_sleep_set, "%lld\n");
+
+static int
 mt7921_pm_stats(struct seq_file *s, void *data)
 {
        struct mt7921_dev *dev = dev_get_drvdata(s->private);
@@ -355,6 +391,7 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
        debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset);
        debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
                                    mt7921_pm_stats);
+       debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds);
 
        return 0;
 }
index 71e664e..7d7d43a 100644 (file)
@@ -74,7 +74,7 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
        mt7921_tx_cleanup(dev);
        if (napi_complete(napi))
                mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return 0;
 }
@@ -92,7 +92,7 @@ static int mt7921_poll_rx(struct napi_struct *napi, int budget)
                return 0;
        }
        done = mt76_dma_rx_poll(napi, budget);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 
        return done;
 }
@@ -313,9 +313,9 @@ static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
 
 int mt7921_wfsys_reset(struct mt7921_dev *dev)
 {
-       mt76_set(dev, 0x70002600, BIT(0));
-       msleep(200);
-       mt76_clear(dev, 0x70002600, BIT(0));
+       mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
+       msleep(50);
+       mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
 
        if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
                              WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
@@ -380,9 +380,7 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
 
 int mt7921_dma_init(struct mt7921_dev *dev)
 {
-       /* Increase buffer size to receive large VHT/HE MPDUs */
        struct mt76_bus_ops *bus_ops;
-       int rx_buf_size = MT_RX_BUF_SIZE * 2;
        int ret;
 
        dev->bus_ops = dev->mt76.bus;
@@ -402,6 +400,10 @@ int mt7921_dma_init(struct mt7921_dev *dev)
        if (ret)
                return ret;
 
+       ret = mt7921_wfsys_reset(dev);
+       if (ret)
+               return ret;
+
        /* init tx queue */
        ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
                                    MT7921_TX_RING_SIZE);
@@ -426,7 +428,7 @@ int mt7921_dma_init(struct mt7921_dev *dev)
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
                               MT7921_RXQ_MCU_WM,
                               MT7921_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_RX_EVENT_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
        if (ret)
                return ret;
 
@@ -434,14 +436,14 @@ int mt7921_dma_init(struct mt7921_dev *dev)
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
                               MT7921_RXQ_MCU_WM,
                               MT7921_RX_MCU_RING_SIZE,
-                              rx_buf_size, MT_WFDMA0(0x540));
+                              MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
        if (ret)
                return ret;
 
        /* rx data */
        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
                               MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
-                              rx_buf_size, MT_RX_DATA_RING_BASE);
+                              MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
        if (ret)
                return ret;
 
index 1763ea0..a9ce10b 100644 (file)
@@ -7,34 +7,6 @@
 #include "mcu.h"
 #include "eeprom.h"
 
-#define CCK_RATE(_idx, _rate) {                                                \
-       .bitrate = _rate,                                               \
-       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                         \
-       .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),                    \
-       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)),        \
-}
-
-#define OFDM_RATE(_idx, _rate) {                                       \
-       .bitrate = _rate,                                               \
-       .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),                   \
-       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),             \
-}
-
-static struct ieee80211_rate mt7921_rates[] = {
-       CCK_RATE(0, 10),
-       CCK_RATE(1, 20),
-       CCK_RATE(2, 55),
-       CCK_RATE(3, 110),
-       OFDM_RATE(11, 60),
-       OFDM_RATE(15, 90),
-       OFDM_RATE(10, 120),
-       OFDM_RATE(14, 180),
-       OFDM_RATE(9,  240),
-       OFDM_RATE(13, 360),
-       OFDM_RATE(8,  480),
-       OFDM_RATE(12, 540),
-};
-
 static const struct ieee80211_iface_limit if_limits[] = {
        {
                .max = MT7921_MAX_INTERFACES,
@@ -73,11 +45,13 @@ static void
 mt7921_init_wiphy(struct ieee80211_hw *hw)
 {
        struct mt7921_phy *phy = mt7921_hw_phy(hw);
+       struct mt7921_dev *dev = phy->dev;
        struct wiphy *wiphy = hw->wiphy;
 
        hw->queues = 4;
        hw->max_rx_aggregation_subframes = 64;
        hw->max_tx_aggregation_subframes = 128;
+       hw->netdev_features = NETIF_F_RXCSUM;
 
        hw->radiotap_timestamp.units_pos =
                IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
@@ -88,11 +62,13 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
        hw->vif_data_size = sizeof(struct mt7921_vif);
 
        wiphy->iface_combinations = if_comb;
+       wiphy->flags &= ~WIPHY_FLAG_IBSS_RSN;
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
        wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
        wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
        wiphy->max_scan_ssids = 4;
        wiphy->max_sched_scan_plan_interval =
-               MT76_CONNAC_MAX_SCHED_SCAN_INTERVAL;
+               MT76_CONNAC_MAX_TIME_SCHED_SCAN_INTERVAL;
        wiphy->max_sched_scan_ie_len = IEEE80211_MAX_DATA_LEN;
        wiphy->max_sched_scan_ssids = MT76_CONNAC_MAX_SCHED_SCAN_SSID;
        wiphy->max_match_sets = MT76_CONNAC_MAX_SCAN_MATCH;
@@ -100,46 +76,33 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
        wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        wiphy->reg_notifier = mt7921_regd_notifier;
 
-       wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+       wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+                          NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
        wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
 
        ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
        ieee80211_hw_set(hw, HAS_RATE_CONTROL);
        ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+       ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
        ieee80211_hw_set(hw, WANT_MONITOR_VIF);
        ieee80211_hw_set(hw, SUPPORTS_PS);
        ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
 
+       if (dev->pm.enable)
+               ieee80211_hw_set(hw, CONNECTION_MONITOR);
+
        hw->max_tx_fragments = 4;
 }
 
 static void
 mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
 {
-       u32 mask, set;
-
        mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
                       MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
        mt76_set(dev, MT_TMAC_CTCR0(band),
                 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
                 MT_TMAC_CTCR0_INS_DDLMT_EN);
 
-       mask = MT_MDP_RCFR0_MCU_RX_MGMT |
-              MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
-              MT_MDP_RCFR0_MCU_RX_CTL_BAR;
-       set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
-       mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
-
-       mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
-              MT_MDP_RCFR1_RX_DROPPED_UCAST |
-              MT_MDP_RCFR1_RX_DROPPED_MCAST;
-       set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
-             FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
-       mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
-
        mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
        mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
 
@@ -148,14 +111,15 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
        mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
 }
 
-void mt7921_mac_init(struct mt7921_dev *dev)
+int mt7921_mac_init(struct mt7921_dev *dev)
 {
        int i;
 
        mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
-       /* disable hardware de-agg */
-       mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
-       mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
+       /* enable hardware de-agg */
+       mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+       /* enable hardware rx header translation */
+       mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_RX_HDR_TRANS_EN);
 
        for (i = 0; i < MT7921_WTBL_SIZE; i++)
                mt7921_mac_wtbl_update(dev, i,
@@ -163,7 +127,7 @@ void mt7921_mac_init(struct mt7921_dev *dev)
        for (i = 0; i < 2; i++)
                mt7921_mac_init_band(dev, i);
 
-       mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
+       return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
 }
 
 static int mt7921_init_hardware(struct mt7921_dev *dev)
@@ -203,9 +167,7 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
        dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
        rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
 
-       mt7921_mac_init(dev);
-
-       return 0;
+       return mt7921_mac_init(dev);
 }
 
 int mt7921_register_device(struct mt7921_dev *dev)
@@ -224,7 +186,6 @@ int mt7921_register_device(struct mt7921_dev *dev)
        mutex_init(&dev->pm.mutex);
        init_waitqueue_head(&dev->pm.wait);
        spin_lock_init(&dev->pm.txq_lock);
-       set_bit(MT76_STATE_PM, &dev->mphy.state);
        INIT_LIST_HEAD(&dev->phy.stats_list);
        INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
        INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
@@ -239,6 +200,8 @@ int mt7921_register_device(struct mt7921_dev *dev)
        dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
        dev->pm.stats.last_wake_event = jiffies;
        dev->pm.stats.last_doze_event = jiffies;
+       dev->pm.enable = true;
+       dev->pm.ds_enable = true;
 
        ret = mt7921_init_hardware(dev);
        if (ret)
@@ -253,19 +216,33 @@ int mt7921_register_device(struct mt7921_dev *dev)
                        IEEE80211_HT_CAP_MAX_AMSDU;
        dev->mphy.sband_5g.sband.vht_cap.cap |=
                        IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
-                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+                       IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+                       (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+
        dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
        dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
 
        mt76_set_stream_caps(&dev->mphy, true);
        mt7921_set_stream_he_caps(&dev->phy);
 
-       ret = mt76_register_device(&dev->mt76, true, mt7921_rates,
-                                  ARRAY_SIZE(mt7921_rates));
+       ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+                                  ARRAY_SIZE(mt76_rates));
+       if (ret)
+               return ret;
+
+       ret = mt7921_init_debugfs(dev);
        if (ret)
                return ret;
 
-       return mt7921_init_debugfs(dev);
+       ret = mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
+       if (ret)
+               return ret;
+
+       dev->hw_init_done = true;
+
+       return 0;
 }
 
 void mt7921_unregister_device(struct mt7921_dev *dev)
index decf2d5..7fe2e3a 100644 (file)
@@ -308,21 +308,24 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
 
 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
 {
+       u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
        struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+       bool hdr_trans, unicast, insert_ccmp_hdr = false;
+       u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
+       __le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
        struct mt76_phy *mphy = &dev->mt76.phy;
        struct mt7921_phy *phy = &dev->phy;
        struct ieee80211_supported_band *sband;
        struct ieee80211_hdr *hdr;
-       __le32 *rxd = (__le32 *)skb->data;
-       __le32 *rxv = NULL;
-       u32 mode = 0;
+       u32 rxd0 = le32_to_cpu(rxd[0]);
        u32 rxd1 = le32_to_cpu(rxd[1]);
        u32 rxd2 = le32_to_cpu(rxd[2]);
        u32 rxd3 = le32_to_cpu(rxd[3]);
-       bool unicast, insert_ccmp_hdr = false;
-       u8 remove_pad;
+       u32 rxd4 = le32_to_cpu(rxd[4]);
+       u16 seq_ctrl = 0;
+       __le16 fc = 0;
+       u32 mode = 0;
        int i, idx;
-       u8 chfreq;
 
        memset(status, 0, sizeof(*status));
 
@@ -332,9 +335,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
        if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
                return -EINVAL;
 
+       if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
+               return -EINVAL;
+
        chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
        unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
        idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+       hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
        status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
 
        if (status->wcid) {
@@ -357,6 +364,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
        if (!sband->channels)
                return -EINVAL;
 
+       if ((rxd0 & csum_mask) == csum_mask)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
        if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
                status->flag |= RX_FLAG_FAILED_FCS_CRC;
 
@@ -377,6 +387,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
 
        rxd += 6;
        if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+               u32 v0 = le32_to_cpu(rxd[0]);
+               u32 v2 = le32_to_cpu(rxd[2]);
+
+               fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
+               seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
+               qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
+
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
                        return -EINVAL;
@@ -386,14 +403,27 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
                u8 *data = (u8 *)rxd;
 
                if (status->flag & RX_FLAG_DECRYPTED) {
-                       status->iv[0] = data[5];
-                       status->iv[1] = data[4];
-                       status->iv[2] = data[3];
-                       status->iv[3] = data[2];
-                       status->iv[4] = data[1];
-                       status->iv[5] = data[0];
-
-                       insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                       switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
+                       case MT_CIPHER_AES_CCMP:
+                       case MT_CIPHER_CCMP_CCX:
+                       case MT_CIPHER_CCMP_256:
+                               insert_ccmp_hdr =
+                                       FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+                               fallthrough;
+                       case MT_CIPHER_TKIP:
+                       case MT_CIPHER_TKIP_NO_MIC:
+                       case MT_CIPHER_GCMP:
+                       case MT_CIPHER_GCMP_256:
+                               status->iv[0] = data[5];
+                               status->iv[1] = data[4];
+                               status->iv[2] = data[3];
+                               status->iv[3] = data[2];
+                               status->iv[4] = data[1];
+                               status->iv[5] = data[0];
+                               break;
+                       default:
+                               break;
+                       }
                }
                rxd += 4;
                if ((u8 *)rxd - skb->data >= skb->len)
@@ -444,16 +474,19 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
                status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
                status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
                status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
-               status->signal = status->chain_signal[0];
-
-               for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
-                       if (!(status->chains & BIT(i)))
+               status->signal = -128;
+               for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
+                       if (!(status->chains & BIT(i)) ||
+                           status->chain_signal[i] >= 0)
                                continue;
 
                        status->signal = max(status->signal,
                                             status->chain_signal[i]);
                }
 
+               if (status->signal == -128)
+                       status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
                stbc = FIELD_GET(MT_PRXV_STBC, v0);
                gi = FIELD_GET(MT_PRXV_SGI, v0);
                cck = false;
@@ -540,10 +573,35 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
 
        skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
 
-       if (insert_ccmp_hdr) {
-               u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+       amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+       status->amsdu = !!amsdu_info;
+       if (status->amsdu) {
+               status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
+               status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
+               if (!hdr_trans) {
+                       memmove(skb->data + 2, skb->data,
+                               ieee80211_get_hdrlen_from_skb(skb));
+                       skb_pull(skb, 2);
+               }
+       }
+
+       if (!hdr_trans) {
+               if (insert_ccmp_hdr) {
+                       u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+                       mt76_insert_ccmp_hdr(skb, key_id);
+               }
 
-               mt76_insert_ccmp_hdr(skb, key_id);
+               hdr = mt76_skb_get_hdr(skb);
+               fc = hdr->frame_control;
+               if (ieee80211_is_data_qos(fc)) {
+                       seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+                       qos_ctl = *ieee80211_get_qos_ctl(hdr);
+               }
+       } else {
+               status->flag &= ~(RX_FLAG_RADIOTAP_HE |
+                                 RX_FLAG_RADIOTAP_HE_MU);
+               status->flag |= RX_FLAG_8023;
        }
 
        mt7921_mac_assoc_rssi(dev, skb);
@@ -551,14 +609,12 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
        if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
                mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
 
-       hdr = mt76_skb_get_hdr(skb);
-       if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+       if (!status->wcid || !ieee80211_is_data_qos(fc))
                return 0;
 
-       status->aggr = unicast &&
-                      !ieee80211_is_qos_nullfunc(hdr->frame_control);
-       status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
-       status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+       status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
+       status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
+       status->qos_ctl = qos_ctl;
 
        return 0;
 }
@@ -676,6 +732,23 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
        txwi[7] |= cpu_to_le32(val);
 }
 
+static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi)
+{
+       struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
+       u32 pid, frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, txwi[2]);
+
+       if (!(frame_type & (IEEE80211_FTYPE_DATA >> 2)))
+               return;
+
+       if (time_is_after_eq_jiffies(msta->next_txs_ts))
+               return;
+
+       msta->next_txs_ts = jiffies + msecs_to_jiffies(250);
+       pid = mt76_get_next_pkt_id(wcid);
+       txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU |
+                              FIELD_PREP(MT_TXD5_PID, pid));
+}
+
 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
                           struct sk_buff *skb, struct mt76_wcid *wcid,
                           struct ieee80211_key_conf *key, bool beacon)
@@ -752,6 +825,8 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
                txwi[6] |= cpu_to_le32(val);
                txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
        }
+
+       mt7921_update_txs(wcid, txwi);
 }
 
 static void
@@ -1154,18 +1229,18 @@ mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
        state->noise = -(phy->noise >> 4);
 }
 
-void mt7921_update_channel(struct mt76_dev *mdev)
+void mt7921_update_channel(struct mt76_phy *mphy)
 {
-       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
 
-       if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
+       if (mt76_connac_pm_wake(mphy, &dev->pm))
                return;
 
-       mt7921_phy_update_channel(&mdev->phy, 0);
+       mt7921_phy_update_channel(mphy, 0);
        /* reset obss airtime */
        mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
 
-       mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+       mt76_connac_power_save_sched(mphy, &dev->pm);
 }
 
 void mt7921_tx_token_put(struct mt7921_dev *dev)
@@ -1196,7 +1271,8 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
        struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
        struct mt7921_dev *dev = mvif->phy->dev;
 
-       ieee80211_disconnect(vif, true);
+       if (vif->type == NL80211_IFTYPE_STATION)
+               ieee80211_disconnect(vif, true);
 
        mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
        mt7921_mcu_set_tx(dev, vif);
@@ -1212,6 +1288,7 @@ mt7921_mac_reset(struct mt7921_dev *dev)
        mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
        mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
 
+       set_bit(MT76_RESET, &dev->mphy.state);
        set_bit(MT76_MCU_RESET, &dev->mphy.state);
        wake_up(&dev->mt76.mcu.wait);
        skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -1227,56 +1304,64 @@ mt7921_mac_reset(struct mt7921_dev *dev)
        mt7921_tx_token_put(dev);
        idr_init(&dev->mt76.token);
 
-       err = mt7921_wpdma_reset(dev, true);
-       if (err)
-               return err;
+       mt7921_wpdma_reset(dev, true);
 
        mt76_for_each_q_rx(&dev->mt76, i) {
                napi_enable(&dev->mt76.napi[i]);
                napi_schedule(&dev->mt76.napi[i]);
        }
 
-       napi_enable(&dev->mt76.tx_napi);
-       napi_schedule(&dev->mt76.tx_napi);
-       mt76_worker_enable(&dev->mt76.tx_worker);
-
        clear_bit(MT76_MCU_RESET, &dev->mphy.state);
-       clear_bit(MT76_STATE_PM, &dev->mphy.state);
 
-       mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+       mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
+               MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+               MT_INT_MCU_CMD);
        mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
 
        err = mt7921_run_firmware(dev);
        if (err)
-               return err;
+               goto out;
 
        err = mt7921_mcu_set_eeprom(dev);
        if (err)
-               return err;
+               goto out;
 
-       mt7921_mac_init(dev);
-       return __mt7921_start(&dev->phy);
+       err = mt7921_mac_init(dev);
+       if (err)
+               goto out;
+
+       err = __mt7921_start(&dev->phy);
+out:
+       clear_bit(MT76_RESET, &dev->mphy.state);
+
+       napi_enable(&dev->mt76.tx_napi);
+       napi_schedule(&dev->mt76.tx_napi);
+       mt76_worker_enable(&dev->mt76.tx_worker);
+
+       return err;
 }
 
 /* system error recovery */
 void mt7921_mac_reset_work(struct work_struct *work)
 {
-       struct ieee80211_hw *hw;
-       struct mt7921_dev *dev;
+       struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+                                             reset_work);
+       struct ieee80211_hw *hw = mt76_hw(dev);
+       struct mt76_connac_pm *pm = &dev->pm;
        int i;
 
-       dev = container_of(work, struct mt7921_dev, reset_work);
-       hw = mt76_hw(dev);
-
        dev_err(dev->mt76.dev, "chip reset\n");
+       dev->hw_full_reset = true;
        ieee80211_stop_queues(hw);
 
        cancel_delayed_work_sync(&dev->mphy.mac_work);
-       cancel_delayed_work_sync(&dev->pm.ps_work);
-       cancel_work_sync(&dev->pm.wake_work);
+       cancel_delayed_work_sync(&pm->ps_work);
+       cancel_work_sync(&pm->wake_work);
 
        mutex_lock(&dev->mt76.mutex);
        for (i = 0; i < 10; i++) {
+               __mt7921_mcu_drv_pmctrl(dev);
+
                if (!mt7921_mac_reset(dev))
                        break;
        }
@@ -1293,16 +1378,24 @@ void mt7921_mac_reset_work(struct work_struct *work)
                ieee80211_scan_completed(dev->mphy.hw, &info);
        }
 
+       dev->hw_full_reset = false;
        ieee80211_wake_queues(hw);
        ieee80211_iterate_active_interfaces(hw,
                                            IEEE80211_IFACE_ITER_RESUME_ALL,
                                            mt7921_vif_connect_iter, NULL);
+       mt76_connac_power_save_sched(&dev->mt76.phy, pm);
 }
 
 void mt7921_reset(struct mt76_dev *mdev)
 {
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
 
+       if (!dev->hw_init_done)
+               return;
+
+       if (dev->hw_full_reset)
+               return;
+
        queue_work(dev->mt76.wq, &dev->reset_work);
 }
 
@@ -1337,30 +1430,6 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
        }
 }
 
-static void
-mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
-{
-       struct mt7921_dev *dev = phy->dev;
-       struct mt7921_sta *msta;
-       LIST_HEAD(list);
-
-       spin_lock_bh(&dev->sta_poll_lock);
-       list_splice_init(&phy->stats_list, &list);
-
-       while (!list_empty(&list)) {
-               msta = list_first_entry(&list, struct mt7921_sta, stats_list);
-               list_del_init(&msta->stats_list);
-               spin_unlock_bh(&dev->sta_poll_lock);
-
-               /* query wtbl info to report tx rate for further devices */
-               mt7921_get_wtbl_info(dev, msta->wcid.idx);
-
-               spin_lock_bh(&dev->sta_poll_lock);
-       }
-
-       spin_unlock_bh(&dev->sta_poll_lock);
-}
-
 void mt7921_mac_work(struct work_struct *work)
 {
        struct mt7921_phy *phy;
@@ -1372,16 +1441,12 @@ void mt7921_mac_work(struct work_struct *work)
 
        mt7921_mutex_acquire(phy->dev);
 
-       mt76_update_survey(mphy->dev);
+       mt76_update_survey(mphy);
        if (++mphy->mac_work_count == 2) {
                mphy->mac_work_count = 0;
 
                mt7921_mac_update_mib_stats(phy);
        }
-       if (++phy->sta_work_count == 4) {
-               phy->sta_work_count = 0;
-               mt7921_mac_sta_stats_work(phy);
-       }
 
        mt7921_mutex_release(phy->dev);
        ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
@@ -1417,13 +1482,15 @@ void mt7921_pm_power_save_work(struct work_struct *work)
 {
        struct mt7921_dev *dev;
        unsigned long delta;
+       struct mt76_phy *mphy;
 
        dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
                                                pm.ps_work.work);
+       mphy = dev->phy.mt76;
 
        delta = dev->pm.idle_timeout;
-       if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
-           test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
+       if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
+           test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
                goto out;
 
        if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
@@ -1431,8 +1498,10 @@ void mt7921_pm_power_save_work(struct work_struct *work)
                goto out;
        }
 
-       if (!mt7921_mcu_fw_pmctrl(dev))
+       if (!mt7921_mcu_fw_pmctrl(dev)) {
+               cancel_delayed_work_sync(&mphy->mac_work);
                return;
+       }
 out:
        queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
 }
@@ -1494,7 +1563,7 @@ void mt7921_coredump_work(struct work_struct *work)
                        break;
 
                skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
-               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+               if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
                        dev_kfree_skb(skb);
                        continue;
                }
@@ -1504,7 +1573,10 @@ void mt7921_coredump_work(struct work_struct *work)
 
                dev_kfree_skb(skb);
        }
-       dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
-                     GFP_KERNEL);
+
+       if (dump)
+               dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+                             GFP_KERNEL);
+
        mt7921_reset(&dev->mt76);
 }
index 109c884..3af67fa 100644 (file)
@@ -88,6 +88,9 @@ enum rx_pkt_type {
 
 /* RXD DW4 */
 #define MT_RXD4_NORMAL_PAYLOAD_FORMAT  GENMASK(1, 0)
+#define MT_RXD4_FIRST_AMSDU_FRAME      GENMASK(1, 0)
+#define MT_RXD4_MID_AMSDU_FRAME                BIT(1)
+#define MT_RXD4_LAST_AMSDU_FRAME       BIT(0)
 #define MT_RXD4_NORMAL_PATTERN_DROP    BIT(9)
 #define MT_RXD4_NORMAL_CLS             BIT(10)
 #define MT_RXD4_NORMAL_OFLD            GENMASK(12, 11)
@@ -97,6 +100,17 @@ enum rx_pkt_type {
 #define MT_RXD3_NORMAL_PF_MODE         BIT(29)
 #define MT_RXD3_NORMAL_PF_STS          GENMASK(31, 30)
 
+/* RXD GROUP4 */
+#define MT_RXD6_FRAME_CONTROL          GENMASK(15, 0)
+#define MT_RXD6_TA_LO                  GENMASK(31, 16)
+
+#define MT_RXD7_TA_HI                  GENMASK(31, 0)
+
+#define MT_RXD8_SEQ_CTRL               GENMASK(15, 0)
+#define MT_RXD8_QOS_CTL                        GENMASK(31, 16)
+
+#define MT_RXD9_HT_CONTROL             GENMASK(31, 0)
+
 /* P-RXV DW0 */
 #define MT_PRXV_TX_RATE                        GENMASK(6, 0)
 #define MT_PRXV_TX_DCM                 BIT(4)
index 97a0ef3..7fd2104 100644 (file)
@@ -79,13 +79,14 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
                he_cap_elem->phy_cap_info[1] =
                        IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
                he_cap_elem->phy_cap_info[2] =
+                       IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
                        IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
-                       IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+                       IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+                       IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+                       IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
 
                switch (i) {
                case NL80211_IFTYPE_STATION:
-                       he_cap_elem->mac_cap_info[0] |=
-                               IEEE80211_HE_MAC_CAP0_TWT_REQ;
                        he_cap_elem->mac_cap_info[1] |=
                                IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
 
@@ -102,7 +103,15 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
                        he_cap_elem->phy_cap_info[3] |=
                                IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
                                IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+                       he_cap_elem->phy_cap_info[4] |=
+                               IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+                               IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+                       he_cap_elem->phy_cap_info[5] |=
+                               IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+                               IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
                        he_cap_elem->phy_cap_info[6] |=
+                               IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+                               IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
                                IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
                                IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
                                IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
@@ -223,54 +232,6 @@ static void mt7921_stop(struct ieee80211_hw *hw)
        mt7921_mutex_release(dev);
 }
 
-static inline int get_free_idx(u32 mask, u8 start, u8 end)
-{
-       return ffs(~mask & GENMASK(end, start));
-}
-
-static int get_omac_idx(enum nl80211_iftype type, u64 mask)
-{
-       int i;
-
-       switch (type) {
-       case NL80211_IFTYPE_STATION:
-               /* prefer hw bssid slot 1-3 */
-               i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
-               if (i)
-                       return i - 1;
-
-               /* next, try to find a free repeater entry for the sta */
-               i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
-                                REPEATER_BSSID_MAX - REPEATER_BSSID_START);
-               if (i)
-                       return i + 32 - 1;
-
-               i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
-               if (i)
-                       return i - 1;
-
-               if (~mask & BIT(HW_BSSID_0))
-                       return HW_BSSID_0;
-
-               break;
-       case NL80211_IFTYPE_MONITOR:
-               /* ap uses hw bssid 0 and ext bssid */
-               if (~mask & BIT(HW_BSSID_0))
-                       return HW_BSSID_0;
-
-               i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
-               if (i)
-                       return i - 1;
-
-               break;
-       default:
-               WARN_ON(1);
-               break;
-       }
-
-       return -1;
-}
-
 static int mt7921_add_interface(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif)
 {
@@ -292,12 +253,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
                goto out;
        }
 
-       idx = get_omac_idx(vif->type, phy->omac_mask);
-       if (idx < 0) {
-               ret = -ENOSPC;
-               goto out;
-       }
-       mvif->mt76.omac_idx = idx;
+       mvif->mt76.omac_idx = mvif->mt76.idx;
        mvif->phy = phy;
        mvif->mt76.band_idx = 0;
        mvif->mt76.wmm_idx = mvif->mt76.idx % MT7921_MAX_WMM_SETS;
@@ -369,7 +325,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
        spin_unlock_bh(&dev->sta_poll_lock);
 }
 
-int mt7921_set_channel(struct mt7921_phy *phy)
+static int mt7921_set_channel(struct mt7921_phy *phy)
 {
        struct mt7921_dev *dev = phy->dev;
        int ret;
@@ -429,6 +385,10 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
                wcid_keyidx = &wcid->hw_key_idx2;
                break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               if (!mvif->wep_sta)
+                       return -EOPNOTSUPP;
        case WLAN_CIPHER_SUITE_TKIP:
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_CCMP_256:
@@ -436,8 +396,6 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        case WLAN_CIPHER_SUITE_GCMP_256:
        case WLAN_CIPHER_SUITE_SMS4:
                break;
-       case WLAN_CIPHER_SUITE_WEP40:
-       case WLAN_CIPHER_SUITE_WEP104:
        default:
                return -EOPNOTSUPP;
        }
@@ -455,6 +413,12 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                            cmd == SET_KEY ? key : NULL);
 
        err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+       if (err)
+               goto out;
+
+       if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
+           key->cipher == WLAN_CIPHER_SUITE_WEP40)
+               err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
 out:
        mt7921_mutex_release(dev);
 
@@ -477,6 +441,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
 
        mt7921_mutex_acquire(dev);
 
+       if (changed & IEEE80211_CONF_CHANGE_POWER)
+               mt76_connac_mcu_set_rate_txpower(phy->mt76);
+
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
                bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
 
@@ -622,7 +589,8 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
                mt7921_mcu_uni_bss_ps(dev, vif);
 
        if (changed & BSS_CHANGED_ASSOC) {
-               mt7921_mcu_sta_add(dev, NULL, vif, true);
+               mt7921_mcu_sta_update(dev, NULL, vif, true,
+                                     MT76_STA_INFO_STATE_ASSOC);
                mt7921_bss_bcnft_apply(dev, vif, info->assoc);
        }
 
@@ -661,14 +629,14 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        if (ret)
                return ret;
 
-       if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
-               mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
-                                           true);
+       if (vif->type == NL80211_IFTYPE_STATION)
+               mvif->wep_sta = msta;
 
        mt7921_mac_wtbl_update(dev, idx,
                               MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
 
-       ret = mt7921_mcu_sta_add(dev, sta, vif, true);
+       ret = mt7921_mcu_sta_update(dev, sta, vif, true,
+                                   MT76_STA_INFO_STATE_NONE);
        if (ret)
                return ret;
 
@@ -677,6 +645,27 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        return 0;
 }
 
+void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+                         struct ieee80211_sta *sta)
+{
+       struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+       struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+
+       mt7921_mutex_acquire(dev);
+
+       if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+               mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
+                                           true);
+
+       mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+                              MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+       mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
+
+       mt7921_mutex_release(dev);
+}
+
 void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta)
 {
@@ -686,13 +675,14 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
        mt76_connac_pm_wake(&dev->mphy, &dev->pm);
 
-       mt7921_mcu_sta_add(dev, sta, vif, false);
+       mt7921_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE);
        mt7921_mac_wtbl_update(dev, msta->wcid.idx,
                               MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
 
        if (vif->type == NL80211_IFTYPE_STATION) {
                struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
 
+               mvif->wep_sta = NULL;
                ewma_rssi_init(&mvif->rssi);
                if (!sta->tdls)
                        mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
@@ -720,7 +710,7 @@ void mt7921_tx_worker(struct mt76_worker *w)
        }
 
        mt76_txq_schedule_all(&dev->mphy);
-       mt76_connac_pm_unref(&dev->pm);
+       mt76_connac_pm_unref(&dev->mphy, &dev->pm);
 }
 
 static void mt7921_tx(struct ieee80211_hw *hw,
@@ -750,7 +740,7 @@ static void mt7921_tx(struct ieee80211_hw *hw,
 
        if (mt76_connac_pm_ref(mphy, &dev->pm)) {
                mt76_tx(mphy, control->sta, wcid, skb);
-               mt76_connac_pm_unref(&dev->pm);
+               mt76_connac_pm_unref(mphy, &dev->pm);
                return;
        }
 
@@ -831,20 +821,21 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        return ret;
 }
 
-static int
-mt7921_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-              struct ieee80211_sta *sta)
+static int mt7921_sta_state(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           enum ieee80211_sta_state old_state,
+                           enum ieee80211_sta_state new_state)
 {
-       return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
-                             IEEE80211_STA_NONE);
-}
+       struct mt7921_dev *dev = mt7921_hw_dev(hw);
 
-static int
-mt7921_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                 struct ieee80211_sta *sta)
-{
-       return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
-                             IEEE80211_STA_NOTEXIST);
+       if (dev->pm.ds_enable) {
+               mt7921_mutex_acquire(dev);
+               mt76_connac_sta_state_dp(&dev->mt76, old_state, new_state);
+               mt7921_mutex_release(dev);
+       }
+
+       return mt76_sta_state(hw, vif, sta, old_state, new_state);
 }
 
 static int
@@ -1163,6 +1154,23 @@ static void mt7921_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                           HZ / 2);
 }
 
+static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif,
+                                        struct ieee80211_sta *sta,
+                                        bool enabled)
+{
+       struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+       struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+       if (enabled)
+               set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+       else
+               clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+
+       mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
+                                            MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
 const struct ieee80211_ops mt7921_ops = {
        .tx = mt7921_tx,
        .start = mt7921_start,
@@ -1173,10 +1181,10 @@ const struct ieee80211_ops mt7921_ops = {
        .conf_tx = mt7921_conf_tx,
        .configure_filter = mt7921_configure_filter,
        .bss_info_changed = mt7921_bss_info_changed,
-       .sta_add = mt7921_sta_add,
-       .sta_remove = mt7921_sta_remove,
+       .sta_state = mt7921_sta_state,
        .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
        .set_key = mt7921_set_key,
+       .sta_set_decap_offload = mt7921_sta_set_decap_offload,
        .ampdu_action = mt7921_ampdu_action,
        .set_rts_threshold = mt7921_set_rts_threshold,
        .wake_tx_queue = mt76_wake_tx_queue,
index 67dc4b4..c2c4dc1 100644 (file)
@@ -88,28 +88,28 @@ struct mt7921_fw_region {
 #define to_wcid_lo(id)                 FIELD_GET(GENMASK(7, 0), (u16)id)
 #define to_wcid_hi(id)                 FIELD_GET(GENMASK(9, 8), (u16)id)
 
-static enum mt7921_cipher_type
+static enum mcu_cipher_type
 mt7921_mcu_get_cipher(int cipher)
 {
        switch (cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
-               return MT_CIPHER_WEP40;
+               return MCU_CIPHER_WEP40;
        case WLAN_CIPHER_SUITE_WEP104:
-               return MT_CIPHER_WEP104;
+               return MCU_CIPHER_WEP104;
        case WLAN_CIPHER_SUITE_TKIP:
-               return MT_CIPHER_TKIP;
+               return MCU_CIPHER_TKIP;
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               return MT_CIPHER_BIP_CMAC_128;
+               return MCU_CIPHER_BIP_CMAC_128;
        case WLAN_CIPHER_SUITE_CCMP:
-               return MT_CIPHER_AES_CCMP;
+               return MCU_CIPHER_AES_CCMP;
        case WLAN_CIPHER_SUITE_CCMP_256:
-               return MT_CIPHER_CCMP_256;
+               return MCU_CIPHER_CCMP_256;
        case WLAN_CIPHER_SUITE_GCMP:
-               return MT_CIPHER_GCMP;
+               return MCU_CIPHER_GCMP;
        case WLAN_CIPHER_SUITE_GCMP_256:
-               return MT_CIPHER_GCMP_256;
+               return MCU_CIPHER_GCMP_256;
        case WLAN_CIPHER_SUITE_SMS4:
-               return MT_CIPHER_WAPI;
+               return MCU_CIPHER_WAPI;
        default:
                return MT_CIPHER_NONE;
        }
@@ -399,43 +399,6 @@ mt7921_mcu_tx_rate_parse(struct mt76_phy *mphy,
 }
 
 static void
-mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
-                         u16 wlan_idx)
-{
-       struct mt7921_mcu_wlan_info_event *wtbl_info;
-       struct mt76_phy *mphy = &dev->mphy;
-       struct mt7921_sta_stats *stats;
-       struct rate_info rate = {};
-       struct mt7921_sta *msta;
-       struct mt76_wcid *wcid;
-       u8 idx;
-
-       if (wlan_idx >= MT76_N_WCIDS)
-               return;
-
-       wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
-       idx = wtbl_info->rate_info.rate_idx;
-       if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
-               return;
-
-       rcu_read_lock();
-
-       wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
-       if (!wcid)
-               goto out;
-
-       msta = container_of(wcid, struct mt7921_sta, wcid);
-       stats = &msta->stats;
-
-       /* current rate */
-       mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
-                                le16_to_cpu(wtbl_info->rate_info.rate[idx]));
-       stats->tx_rate = rate;
-out:
-       rcu_read_unlock();
-}
-
-static void
 mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
 {
        struct mt76_phy *mphy = &dev->mt76.phy;
@@ -450,22 +413,33 @@ mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
 }
 
 static void
-mt7921_mcu_beacon_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
+mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
+                               struct ieee80211_vif *vif)
+{
+       struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+       struct mt76_connac_beacon_loss_event *event = priv;
+
+       if (mvif->idx != event->bss_idx)
+               return;
+
+       if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+               return;
+
+       ieee80211_connection_loss(vif);
+}
+
+static void
+mt7921_mcu_connection_loss_event(struct mt7921_dev *dev, struct sk_buff *skb)
 {
        struct mt76_connac_beacon_loss_event *event;
-       struct mt76_phy *mphy;
-       u8 band_idx = 0; /* DBDC support */
+       struct mt76_phy *mphy = &dev->mt76.phy;
 
        skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
        event = (struct mt76_connac_beacon_loss_event *)skb->data;
-       if (band_idx && dev->mt76.phy2)
-               mphy = dev->mt76.phy2;
-       else
-               mphy = &dev->mt76.phy;
 
        ieee80211_iterate_active_interfaces_atomic(mphy->hw,
                                        IEEE80211_IFACE_ITER_RESUME_ALL,
-                                       mt76_connac_mcu_beacon_loss_iter, event);
+                                       mt7921_mcu_connection_loss_iter, event);
 }
 
 static void
@@ -524,13 +498,56 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
 }
 
 static void
+mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+       struct mt7921_mcu_tx_done_event *event;
+       struct mt7921_sta *msta;
+       struct mt7921_phy *mphy = &dev->phy;
+       struct mt7921_mcu_peer_cap peer;
+       struct ieee80211_sta *sta;
+       LIST_HEAD(list);
+
+       skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
+       event = (struct mt7921_mcu_tx_done_event *)skb->data;
+
+       spin_lock_bh(&dev->sta_poll_lock);
+       list_splice_init(&mphy->stats_list, &list);
+
+       while (!list_empty(&list)) {
+               msta = list_first_entry(&list, struct mt7921_sta, stats_list);
+               list_del_init(&msta->stats_list);
+
+               if (msta->wcid.idx != event->wlan_idx)
+                       continue;
+
+               spin_unlock_bh(&dev->sta_poll_lock);
+
+               sta = wcid_to_sta(&msta->wcid);
+
+               /* peer config based on IEEE SPEC */
+               memset(&peer, 0x0, sizeof(peer));
+               peer.bw = event->bw;
+               peer.g2 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+               peer.g4 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+               peer.g8 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+               peer.g16 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+               mt7921_mcu_tx_rate_parse(mphy->mt76, &peer,
+                                        &msta->stats.tx_rate, event->tx_rate);
+
+               spin_lock_bh(&dev->sta_poll_lock);
+               break;
+       }
+       spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+static void
 mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
 {
        struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
 
        switch (rxd->eid) {
        case MCU_EVENT_BSS_BEACON_LOSS:
-               mt7921_mcu_beacon_loss_event(dev, skb);
+               mt7921_mcu_connection_loss_event(dev, skb);
                break;
        case MCU_EVENT_SCHED_SCAN_DONE:
        case MCU_EVENT_SCAN_DONE:
@@ -549,6 +566,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
        case MCU_EVENT_LP_INFO:
                mt7921_mcu_low_power_event(dev, skb);
                break;
+       case MCU_EVENT_TX_DONE:
+               mt7921_mcu_tx_done_event(dev, skb);
+               break;
        default:
                break;
        }
@@ -569,6 +589,7 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
            rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
            rxd->eid == MCU_EVENT_BSS_ABSENCE ||
            rxd->eid == MCU_EVENT_SCAN_DONE ||
+           rxd->eid == MCU_EVENT_TX_DONE ||
            rxd->eid == MCU_EVENT_DBG_MSG ||
            rxd->eid == MCU_EVENT_COREDUMP ||
            rxd->eid == MCU_EVENT_LP_INFO ||
@@ -604,14 +625,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
                sec_key = &sec->key[0];
                sec_key->cipher_len = sizeof(*sec_key);
 
-               if (cipher == MT_CIPHER_BIP_CMAC_128) {
-                       sec_key->cipher_id = MT_CIPHER_AES_CCMP;
+               if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+                       sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
                        sec_key->key_id = bip->keyidx;
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, bip->key, 16);
 
                        sec_key = &sec->key[1];
-                       sec_key->cipher_id = MT_CIPHER_BIP_CMAC_128;
+                       sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
                        sec_key->cipher_len = sizeof(*sec_key);
                        sec_key->key_len = 16;
                        memcpy(sec_key->key, key->key, 16);
@@ -623,14 +644,14 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
                        sec_key->key_len = key->keylen;
                        memcpy(sec_key->key, key->key, key->keylen);
 
-                       if (cipher == MT_CIPHER_TKIP) {
+                       if (cipher == MCU_CIPHER_TKIP) {
                                /* Rx/Tx MIC keys are swapped */
                                memcpy(sec_key->key + 16, key->key + 24, 8);
                                memcpy(sec_key->key + 24, key->key + 16, 8);
                        }
 
                        /* store key_conf for BIP batch update */
-                       if (cipher == MT_CIPHER_AES_CCMP) {
+                       if (cipher == MCU_CIPHER_AES_CCMP) {
                                memcpy(bip->key, key->key, key->keylen);
                                bip->keyidx = key->keyidx;
                        }
@@ -934,8 +955,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
        dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
 #endif /* CONFIG_PM */
 
-       clear_bit(MT76_STATE_PM, &dev->mphy.state);
-
        dev_err(dev->mt76.dev, "Firmware init done\n");
 
        return 0;
@@ -969,7 +988,7 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
        set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
        mt7921_mcu_fw_log_2_host(dev, 1);
 
-       return 0;
+       return mt76_connac_mcu_get_nic_capability(&dev->mphy);
 }
 
 int mt7921_mcu_init(struct mt7921_dev *dev)
@@ -1136,26 +1155,6 @@ int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
        return 0;
 }
 
-u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx)
-{
-       struct mt7921_mcu_wlan_info wtbl_info = {
-               .wlan_idx = cpu_to_le32(wlan_idx),
-       };
-       struct sk_buff *skb;
-       int ret;
-
-       ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_WTBL,
-                                       &wtbl_info, sizeof(wtbl_info), true,
-                                       &skb);
-       if (ret)
-               return ret;
-
-       mt7921_mcu_tx_rate_report(dev, skb, wlan_idx);
-       dev_kfree_skb(skb);
-
-       return 0;
-}
-
 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
 {
        struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -1268,8 +1267,9 @@ int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                                 sizeof(req), false);
 }
 
-int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
-                      struct ieee80211_vif *vif, bool enable)
+int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+                         struct ieee80211_vif *vif, bool enable,
+                         enum mt76_sta_info_state state)
 {
        struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
        int rssi = -ewma_rssi_read(&mvif->rssi);
@@ -1278,27 +1278,25 @@ int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
                .vif = vif,
                .enable = enable,
                .cmd = MCU_UNI_CMD_STA_REC_UPDATE,
+               .state = state,
+               .offload_fw = true,
                .rcpi = to_rcpi(rssi),
        };
        struct mt7921_sta *msta;
 
        msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL;
        info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
+       info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
 
-       return mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info);
+       return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
 }
 
-int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
 {
        struct mt76_phy *mphy = &dev->mt76.phy;
        struct mt76_connac_pm *pm = &dev->pm;
        int i, err = 0;
 
-       mutex_lock(&pm->mutex);
-
-       if (!test_bit(MT76_STATE_PM, &mphy->state))
-               goto out;
-
        for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
                mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
                if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
@@ -1319,6 +1317,22 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
        pm->stats.doze_time += pm->stats.last_wake_event -
                               pm->stats.last_doze_event;
 out:
+       return err;
+}
+
+int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       struct mt76_connac_pm *pm = &dev->pm;
+       int err = 0;
+
+       mutex_lock(&pm->mutex);
+
+       if (!test_bit(MT76_STATE_PM, &mphy->state))
+               goto out;
+
+       err = __mt7921_mcu_drv_pmctrl(dev);
+out:
        mutex_unlock(&pm->mutex);
 
        if (err)
@@ -1368,6 +1382,7 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
 {
        struct mt7921_phy *phy = priv;
        struct mt7921_dev *dev = phy->dev;
+       struct ieee80211_hw *hw = mt76_hw(dev);
        int ret;
 
        if (dev->pm.enable)
@@ -1380,9 +1395,11 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
 
        if (dev->pm.enable) {
                vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+               ieee80211_hw_set(hw, CONNECTION_MONITOR);
                mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
        } else {
                vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+               __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
                mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
        }
 }
index 49823d0..d76cf8f 100644 (file)
@@ -81,6 +81,7 @@ enum {
        MCU_EVENT_REG_ACCESS = 0x05,
        MCU_EVENT_LP_INFO = 0x07,
        MCU_EVENT_SCAN_DONE = 0x0d,
+       MCU_EVENT_TX_DONE = 0x0f,
        MCU_EVENT_BSS_ABSENCE  = 0x11,
        MCU_EVENT_BSS_BEACON_LOSS = 0x13,
        MCU_EVENT_CH_PRIVILEGE = 0x18,
@@ -197,18 +198,17 @@ struct sta_rec_sec {
        struct sec_key key[2];
 } __packed;
 
-enum mt7921_cipher_type {
-       MT_CIPHER_NONE,
-       MT_CIPHER_WEP40,
-       MT_CIPHER_WEP104,
-       MT_CIPHER_WEP128,
-       MT_CIPHER_TKIP,
-       MT_CIPHER_AES_CCMP,
-       MT_CIPHER_CCMP_256,
-       MT_CIPHER_GCMP,
-       MT_CIPHER_GCMP_256,
-       MT_CIPHER_WAPI,
-       MT_CIPHER_BIP_CMAC_128,
+enum mcu_cipher_type {
+       MCU_CIPHER_WEP40 = 1,
+       MCU_CIPHER_WEP104,
+       MCU_CIPHER_WEP128,
+       MCU_CIPHER_TKIP,
+       MCU_CIPHER_AES_CCMP,
+       MCU_CIPHER_CCMP_256,
+       MCU_CIPHER_GCMP,
+       MCU_CIPHER_GCMP_256,
+       MCU_CIPHER_WAPI,
+       MCU_CIPHER_BIP_CMAC_128,
 };
 
 enum {
@@ -254,86 +254,6 @@ struct mt7921_mcu_reg_event {
        __le32 val;
 } __packed;
 
-struct mt7921_mcu_tx_config {
-       u8 peer_addr[ETH_ALEN];
-       u8 sw;
-       u8 dis_rx_hdr_tran;
-
-       u8 aad_om;
-       u8 pfmu_idx;
-       __le16 partial_aid;
-
-       u8 ibf;
-       u8 ebf;
-       u8 is_ht;
-       u8 is_vht;
-
-       u8 mesh;
-       u8 baf_en;
-       u8 cf_ack;
-       u8 rdg_ba;
-
-       u8 rdg;
-       u8 pm;
-       u8 rts;
-       u8 smps;
-
-       u8 txop_ps;
-       u8 not_update_ipsm;
-       u8 skip_tx;
-       u8 ldpc;
-
-       u8 qos;
-       u8 from_ds;
-       u8 to_ds;
-       u8 dyn_bw;
-
-       u8 amdsu_cross_lg;
-       u8 check_per;
-       u8 gid_63;
-       u8 he;
-
-       u8 vht_ibf;
-       u8 vht_ebf;
-       u8 vht_ldpc;
-       u8 he_ldpc;
-} __packed;
-
-struct mt7921_mcu_sec_config {
-       u8 wpi_flag;
-       u8 rv;
-       u8 ikv;
-       u8 rkv;
-
-       u8 rcid;
-       u8 rca1;
-       u8 rca2;
-       u8 even_pn;
-
-       u8 key_id;
-       u8 muar_idx;
-       u8 cipher_suit;
-       u8 rsv[1];
-} __packed;
-
-struct mt7921_mcu_key_config {
-       u8 key[32];
-} __packed;
-
-struct mt7921_mcu_rate_info {
-       u8 mpdu_fail;
-       u8 mpdu_tx;
-       u8 rate_idx;
-       u8 rsv[1];
-       __le16 rate[8];
-} __packed;
-
-struct mt7921_mcu_ba_config {
-       u8 ba_en;
-       u8 rsv[3];
-       __le32 ba_winsize;
-} __packed;
-
 struct mt7921_mcu_ant_id_config {
        u8 ant_id[4];
 } __packed;
@@ -357,41 +277,6 @@ struct mt7921_mcu_peer_cap {
        u8 rsv[1];
 } __packed;
 
-struct mt7921_mcu_rx_cnt {
-       u8 rx_rcpi[4];
-       u8 rx_cc[4];
-       u8 rx_cc_sel;
-       u8 ce_rmsd;
-       u8 rsv[2];
-} __packed;
-
-struct mt7921_mcu_tx_cnt {
-       __le16 rate1_cnt;
-       __le16 rate1_fail_cnt;
-       __le16 rate2_cnt;
-       __le16 rate3_cnt;
-       __le16 cur_bw_tx_cnt;
-       __le16 cur_bw_tx_fail_cnt;
-       __le16 other_bw_tx_cnt;
-       __le16 other_bw_tx_fail_cnt;
-} __packed;
-
-struct mt7921_mcu_wlan_info_event {
-       struct mt7921_mcu_tx_config tx_config;
-       struct mt7921_mcu_sec_config sec_config;
-       struct mt7921_mcu_key_config key_config;
-       struct mt7921_mcu_rate_info rate_info;
-       struct mt7921_mcu_ba_config ba_config;
-       struct mt7921_mcu_peer_cap peer_cap;
-       struct mt7921_mcu_rx_cnt rx_cnt;
-       struct mt7921_mcu_tx_cnt tx_cnt;
-} __packed;
-
-struct mt7921_mcu_wlan_info {
-       __le32 wlan_idx;
-       struct mt7921_mcu_wlan_info_event event;
-} __packed;
-
 struct mt7921_txpwr_req {
        u8 ver;
        u8 action;
@@ -407,4 +292,31 @@ struct mt7921_txpwr_event {
        struct mt7921_txpwr txpwr;
 } __packed;
 
+struct mt7921_mcu_tx_done_event {
+       u8 pid;
+       u8 status;
+       u16 seq;
+
+       u8 wlan_idx;
+       u8 tx_cnt;
+       u16 tx_rate;
+
+       u8 flag;
+       u8 tid;
+       u8 rsp_rate;
+       u8 mcs;
+
+       u8 bw;
+       u8 tx_pwr;
+       u8 reason;
+       u8 rsv0[1];
+
+       u32 delay;
+       u32 timestamp;
+       u32 applied_flag;
+
+       u8 txs[28];
+
+       u8 rsv1[32];
+} __packed;
 #endif
index 59862ea..2d8bd6b 100644 (file)
@@ -92,6 +92,8 @@ struct mt7921_sta {
        unsigned long ampdu_state;
 
        struct mt7921_sta_key_conf bip;
+
+       unsigned long next_txs_ts;
 };
 
 DECLARE_EWMA(rssi, 10, 8);
@@ -100,6 +102,8 @@ struct mt7921_vif {
        struct mt76_vif mt76; /* must be first */
 
        struct mt7921_sta sta;
+       struct mt7921_sta *wep_sta;
+
        struct mt7921_phy *phy;
 
        struct ewma_rssi rssi;
@@ -156,6 +160,8 @@ struct mt7921_dev {
        u16 chainmask;
 
        struct work_struct reset_work;
+       bool hw_full_reset:1;
+       bool hw_init_done:1;
 
        struct list_head sta_poll_list;
        spinlock_t sta_poll_lock;
@@ -256,9 +262,9 @@ int mt7921_mcu_init(struct mt7921_dev *dev);
 int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                       struct mt7921_sta *msta, struct ieee80211_key_conf *key,
                       enum set_key_cmd cmd);
-int mt7921_set_channel(struct mt7921_phy *phy);
-int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
-                      struct ieee80211_vif *vif, bool enable);
+int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
+                         struct ieee80211_vif *vif, bool enable,
+                         enum mt76_sta_info_state state);
 int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
 int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
 int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
@@ -318,7 +324,7 @@ static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
        return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
 }
 
-void mt7921_mac_init(struct mt7921_dev *dev);
+int mt7921_mac_init(struct mt7921_dev *dev);
 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
 void mt7921_mac_reset_counters(struct mt7921_phy *phy);
 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
@@ -330,6 +336,8 @@ void mt7921_mac_fill_rx_vector(struct mt7921_dev *dev, struct sk_buff *skb);
 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb);
 int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
+void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+                         struct ieee80211_sta *sta);
 void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta);
 void mt7921_mac_work(struct work_struct *work);
@@ -352,7 +360,7 @@ void mt7921_stats_work(struct work_struct *work);
 void mt7921_txp_skb_unmap(struct mt76_dev *dev,
                          struct mt76_txwi_cache *txwi);
 void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
-void mt7921_update_channel(struct mt76_dev *mdev);
+void mt7921_update_channel(struct mt76_phy *mphy);
 int mt7921_init_debugfs(struct mt7921_dev *dev);
 
 int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
@@ -362,12 +370,12 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
                         struct ieee80211_ampdu_params *params,
                         bool enable);
 void mt7921_scan_work(struct work_struct *work);
-u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx);
 int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
 int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                             bool enable);
 int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
                          bool enable);
+int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
 void mt7921_pm_wake_work(struct work_struct *work);
index fa02d93..c3905bc 100644 (file)
@@ -106,6 +106,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
                .rx_poll_complete = mt7921_rx_poll_complete,
                .sta_ps = mt7921_sta_ps,
                .sta_add = mt7921_mac_sta_add,
+               .sta_assoc = mt7921_mac_sta_assoc,
                .sta_remove = mt7921_mac_sta_remove,
                .update_survey = mt7921_update_channel,
        };
@@ -188,22 +189,29 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct mt76_dev *mdev = pci_get_drvdata(pdev);
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt76_connac_pm *pm = &dev->pm;
        bool hif_suspend;
        int i, err;
 
-       err = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+       pm->suspended = true;
+       cancel_delayed_work_sync(&pm->ps_work);
+       cancel_work_sync(&pm->wake_work);
+
+       err = mt7921_mcu_drv_pmctrl(dev);
        if (err < 0)
-               return err;
+               goto restore_suspend;
 
        hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
        if (hif_suspend) {
                err = mt76_connac_mcu_set_hif_suspend(mdev, true);
                if (err)
-                       return err;
+                       goto restore_suspend;
        }
 
-       if (!dev->pm.enable)
-               mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
+       /* always enable deep sleep during suspend to reduce
+        * power consumption
+        */
+       mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
 
        napi_disable(&mdev->tx_napi);
        mt76_worker_disable(&mdev->tx_worker);
@@ -231,27 +239,30 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 
        err = mt7921_mcu_fw_pmctrl(dev);
        if (err)
-               goto restore;
+               goto restore_napi;
 
        pci_save_state(pdev);
        err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
        if (err)
-               goto restore;
+               goto restore_napi;
 
        return 0;
 
-restore:
+restore_napi:
        mt76_for_each_q_rx(mdev, i) {
                napi_enable(&mdev->napi[i]);
        }
        napi_enable(&mdev->tx_napi);
 
-       if (!dev->pm.enable)
+       if (!pm->ds_enable)
                mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
 
        if (hif_suspend)
                mt76_connac_mcu_set_hif_suspend(mdev, false);
 
+restore_suspend:
+       pm->suspended = false;
+
        return err;
 }
 
@@ -259,8 +270,10 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
 {
        struct mt76_dev *mdev = pci_get_drvdata(pdev);
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+       struct mt76_connac_pm *pm = &dev->pm;
        int i, err;
 
+       pm->suspended = false;
        err = pci_set_power_state(pdev, PCI_D0);
        if (err)
                return err;
@@ -291,7 +304,8 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
        napi_enable(&mdev->tx_napi);
        napi_schedule(&mdev->tx_napi);
 
-       if (!dev->pm.enable)
+       /* restore previous ds setting */
+       if (!pm->ds_enable)
                mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
 
        if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
index a18d289..783a156 100644 (file)
@@ -184,9 +184,6 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
        if (!q->queued)
                wake_up(&dev->tx_wait);
 
-       if (!mcu)
-               mt76_txq_schedule(&dev->phy, q->qid);
-
        return nframes;
 }
 
@@ -195,19 +192,28 @@ static void mt76s_status_worker(struct mt76_worker *w)
        struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
                                              status_worker);
        struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+       bool resched = false;
        int i, nframes;
 
        do {
+               int ndata_frames = 0;
+
                nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
 
                for (i = 0; i <= MT_TXQ_PSD; i++)
-                       nframes += mt76s_process_tx_queue(dev,
-                                                         dev->phy.q_tx[i]);
+                       ndata_frames += mt76s_process_tx_queue(dev,
+                                                              dev->phy.q_tx[i]);
+               nframes += ndata_frames;
+               if (ndata_frames > 0)
+                       resched = true;
 
                if (dev->drv->tx_status_data &&
                    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
                        queue_work(dev->wq, &dev->sdio.stat_work);
        } while (nframes > 0);
+
+       if (resched)
+               mt76_worker_schedule(&dev->sdio.txrx_worker);
 }
 
 static void mt76s_tx_status_data(struct work_struct *work)
@@ -256,6 +262,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 
        q->entry[q->head].skb = tx_info.skb;
        q->entry[q->head].buf_sz = len;
+       q->entry[q->head].wcid = 0xffff;
 
        smp_wmb();
 
index 001d0ba..f73ffbd 100644 (file)
@@ -88,17 +88,8 @@ static void
 mt76_testmode_free_skb(struct mt76_phy *phy)
 {
        struct mt76_testmode_data *td = &phy->test;
-       struct sk_buff *skb = td->tx_skb;
-
-       if (!skb)
-               return;
 
-       if (skb_has_frag_list(skb)) {
-               kfree_skb_list(skb_shinfo(skb)->frag_list);
-               skb_shinfo(skb)->frag_list = NULL;
-       }
-
-       dev_kfree_skb(skb);
+       dev_kfree_skb(td->tx_skb);
        td->tx_skb = NULL;
 }
 
@@ -158,19 +149,18 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
                        frag_len = MT_TXP_MAX_LEN;
 
                frag = alloc_skb(frag_len, GFP_KERNEL);
-               if (!frag)
+               if (!frag) {
+                       mt76_testmode_free_skb(phy);
+                       dev_kfree_skb(head);
                        return -ENOMEM;
+               }
 
                __skb_put_zero(frag, frag_len);
                head->len += frag->len;
                head->data_len += frag->len;
 
-               if (*frag_tail) {
-                       (*frag_tail)->next = frag;
-                       frag_tail = &frag;
-               } else {
-                       *frag_tail = frag;
-               }
+               *frag_tail = frag;
+               frag_tail = &(*frag_tail)->next;
        }
 
        mt76_testmode_free_skb(phy);
@@ -531,6 +521,14 @@ mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
        u64 rx_fcs_error = 0;
        int i;
 
+       if (dev->test_ops->dump_stats) {
+               int ret;
+
+               ret = dev->test_ops->dump_stats(phy, msg);
+               if (ret)
+                       return ret;
+       }
+
        for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) {
                rx_packets += td->rx_stats.packets[i];
                rx_fcs_error += td->rx_stats.fcs_error[i];
@@ -545,9 +543,6 @@ mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg)
                              MT76_TM_STATS_ATTR_PAD))
                return -EMSGSIZE;
 
-       if (dev->test_ops->dump_stats)
-               return dev->test_ops->dump_stats(phy, msg);
-
        return 0;
 }
 
index 53ea8de..f0f7a91 100644 (file)
@@ -54,11 +54,23 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
 
        spin_unlock_bh(&dev->status_list.lock);
 
+       rcu_read_lock();
        while ((skb = __skb_dequeue(list)) != NULL) {
+               struct ieee80211_tx_status status = {
+                       .skb = skb,
+                       .info = IEEE80211_SKB_CB(skb),
+               };
+               struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+               struct mt76_wcid *wcid;
+
+               wcid = rcu_dereference(dev->wcid[cb->wcid]);
+               if (wcid)
+                       status.sta = wcid_to_sta(wcid);
+
                hw = mt76_tx_status_get_hw(dev, skb);
-               ieee80211_tx_status(hw, skb);
+               ieee80211_tx_status_ext(hw, &status);
        }
-
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
 
@@ -80,7 +92,7 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
 
        /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
        if (flags & MT_TX_CB_TXS_FAILED) {
-               ieee80211_tx_info_clear_status(info);
+               info->status.rates[0].count = 0;
                info->status.rates[0].idx = -1;
                info->flags |= IEEE80211_TX_STAT_ACK;
        }
@@ -117,12 +129,7 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
        spin_lock_bh(&dev->status_list.lock);
 
        memset(cb, 0, sizeof(*cb));
-       wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
-       if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
-           wcid->packet_id == MT_PACKET_ID_NO_SKB)
-               wcid->packet_id = MT_PACKET_ID_FIRST;
-
-       pid = wcid->packet_id;
+       pid = mt76_get_next_pkt_id(wcid);
        cb->wcid = wcid->idx;
        cb->pktid = pid;
        cb->jiffies = jiffies;
@@ -173,36 +180,37 @@ mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
 
 static void
-mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
+                     struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct mt76_wcid *wcid;
        int pending;
 
-       if (info->tx_time_est)
-               return;
-
-       if (wcid_idx >= ARRAY_SIZE(dev->wcid))
+       if (!wcid || info->tx_time_est)
                return;
 
-       rcu_read_lock();
-
-       wcid = rcu_dereference(dev->wcid[wcid_idx]);
-       if (wcid) {
-               pending = atomic_dec_return(&wcid->non_aql_packets);
-               if (pending < 0)
-                       atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
-       }
-
-       rcu_read_unlock();
+       pending = atomic_dec_return(&wcid->non_aql_packets);
+       if (pending < 0)
+               atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
 }
 
-void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
+                           struct list_head *free_list)
 {
+       struct ieee80211_tx_status status = {
+               .skb = skb,
+               .free_list = free_list,
+       };
+       struct mt76_wcid *wcid = NULL;
        struct ieee80211_hw *hw;
        struct sk_buff_head list;
 
-       mt76_tx_check_non_aql(dev, wcid_idx, skb);
+       rcu_read_lock();
+
+       if (wcid_idx < ARRAY_SIZE(dev->wcid))
+               wcid = rcu_dereference(dev->wcid[wcid_idx]);
+
+       mt76_tx_check_non_aql(dev, wcid, skb);
 
 #ifdef CONFIG_NL80211_TESTMODE
        if (mt76_is_testmode_skb(dev, skb, &hw)) {
@@ -214,21 +222,25 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk
                        wake_up(&dev->tx_wait);
 
                dev_kfree_skb_any(skb);
-               return;
+               goto out;
        }
 #endif
 
        if (!skb->prev) {
                hw = mt76_tx_status_get_hw(dev, skb);
-               ieee80211_free_txskb(hw, skb);
-               return;
+               status.sta = wcid_to_sta(wcid);
+               ieee80211_tx_status_ext(hw, &status);
+               goto out;
        }
 
        mt76_tx_status_lock(dev, &list);
        __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
        mt76_tx_status_unlock(dev, &list);
+
+out:
+       rcu_read_unlock();
 }
-EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
 
 static int
 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
@@ -244,11 +256,15 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
 
        non_aql = !info->tx_time_est;
        idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
-       if (idx < 0 || !sta || !non_aql)
+       if (idx < 0 || !sta)
                return idx;
 
        wcid = (struct mt76_wcid *)sta->drv_priv;
        q->entry[idx].wcid = wcid->idx;
+
+       if (!non_aql)
+               return idx;
+
        pending = atomic_inc_return(&wcid->non_aql_packets);
        if (stop && pending >= MT_MAX_NON_AQL_PKT)
                *stop = true;
@@ -285,7 +301,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
                skb_set_queue_mapping(skb, qid);
        }
 
-       if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+       if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
                ieee80211_get_tx_rates(info->control.vif, sta, skb,
                                       info->control.rates, 1);
 
index 30bc54e..1e9f60b 100644 (file)
@@ -925,6 +925,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 
        q->head = (q->head + 1) % q->ndesc;
        q->entry[idx].skb = tx_info.skb;
+       q->entry[idx].wcid = 0xffff;
        q->queued++;
 
        return idx;
index 6bcc4a1..cc77204 100644 (file)
@@ -26,6 +26,7 @@ static const struct usb_device_id mt7601u_device_table[] = {
        { USB_DEVICE(0x2717, 0x4106) },
        { USB_DEVICE(0x2955, 0x0001) },
        { USB_DEVICE(0x2955, 0x1001) },
+       { USB_DEVICE(0x2955, 0x1003) },
        { USB_DEVICE(0x2a5f, 0x1000) },
        { USB_DEVICE(0x7392, 0x7710) },
        { 0, }
index d1a566c..0173577 100644 (file)
@@ -853,15 +853,10 @@ struct rtl8192eu_efuse {
        u8 usb_optional_function;
        u8 res9[2];
        u8 mac_addr[ETH_ALEN];          /* 0xd7 */
-       u8 res10[2];
-       u8 vendor_name[7];
-       u8 res11[2];
-       u8 device_name[0x0b];           /* 0xe8 */
-       u8 res12[2];
-       u8 serial[0x0b];                /* 0xf5 */
-       u8 res13[0x30];
+       u8 device_info[80];
+       u8 res11[3];
        u8 unknown[0x0d];               /* 0x130 */
-       u8 res14[0xc3];
+       u8 res12[0xc3];
 };
 
 struct rtl8xxxu_reg8val {
index cfe2dfd..b06508d 100644 (file)
@@ -554,9 +554,43 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
        }
 }
 
+static void rtl8192eu_log_next_device_info(struct rtl8xxxu_priv *priv,
+                                          char *record_name,
+                                          char *device_info,
+                                          unsigned int *record_offset)
+{
+       char *record = device_info + *record_offset;
+
+       /* A record is [ total length | 0x03 | value ] */
+       unsigned char l = record[0];
+
+       /*
+        * The whole device info section seems to be 80 characters, make sure
+        * we don't read further.
+        */
+       if (*record_offset + l > 80) {
+               dev_warn(&priv->udev->dev,
+                        "invalid record length %d while parsing \"%s\" at offset %u.\n",
+                        l, record_name, *record_offset);
+               return;
+       }
+
+       if (l >= 2) {
+               char value[80];
+
+               memcpy(value, &record[2], l - 2);
+               value[l - 2] = '\0';
+               dev_info(&priv->udev->dev, "%s: %s\n", record_name, value);
+               *record_offset = *record_offset + l;
+       } else {
+               dev_info(&priv->udev->dev, "%s not available.\n", record_name);
+       }
+}
+
 static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
 {
        struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
+       unsigned int record_offset;
        int i;
 
        if (efuse->rtl_id != cpu_to_le16(0x8129))
@@ -604,12 +638,25 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
        priv->has_xtalk = 1;
        priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
 
-       dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
-       dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
-       if (memchr_inv(efuse->serial, 0xff, 11))
-               dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
-       else
-               dev_info(&priv->udev->dev, "Serial not available.\n");
+       /*
+        * device_info section seems to be laid out as records
+        * [ total length | 0x03 | value ] so:
+        * - vendor length + 2
+        * - 0x03
+        * - vendor string (not null terminated)
+        * - product length + 2
+        * - 0x03
+        * - product string (not null terminated)
+        * Then there is one or 2 0x00 on all the 4 devices I own or found
+        * dumped online.
+        * As previous version of the code handled an optional serial
+        * string, I now assume there may be a third record if the
+        * length is not 0.
+        */
+       record_offset = 0;
+       rtl8192eu_log_next_device_info(priv, "Vendor", efuse->device_info, &record_offset);
+       rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
+       rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
 
        if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
                unsigned char *raw = priv->efuse_wifi.raw;
index 9ff09cf..ac1061c 100644 (file)
@@ -5554,6 +5554,11 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
        urb_len = skb->len;
        pkt_cnt = 0;
 
+       if (urb_len < sizeof(struct rtl8xxxu_rxdesc16)) {
+               kfree_skb(skb);
+               return RX_TYPE_ERROR;
+       }
+
        do {
                rx_desc = (struct rtl8xxxu_rxdesc16 *)skb->data;
                _rx_desc_le = (__le32 *)skb->data;
@@ -5581,7 +5586,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
                 * at least cover the rx descriptor
                 */
                if (pkt_cnt > 1 &&
-                   urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
+                   urb_len >= (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
                        next_skb = skb_clone(skb, GFP_ATOMIC);
 
                rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5627,7 +5632,9 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
 
                pkt_cnt--;
                urb_len -= pkt_offset;
-       } while (skb && urb_len > 0 && pkt_cnt > 0);
+               next_skb = NULL;
+       } while (skb && pkt_cnt > 0 &&
+                urb_len >= sizeof(struct rtl8xxxu_rxdesc16));
 
        return RX_TYPE_DATA_PKT;
 }
index 68ec009..76dd881 100644 (file)
@@ -2574,7 +2574,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                "path-B / 2.4G LCK\n");
                }
-               memset(&curvecount_val[0], 0, CV_CURVE_CNT * 2);
+               memset(curvecount_val, 0, sizeof(curvecount_val));
                /* Set LC calibration off */
                rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
                              0x08000, 0x0);
index cedbf38..2551e22 100644 (file)
@@ -591,8 +591,10 @@ void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb)
        struct rtw_coex *coex = &rtwdev->coex;
        u8 *payload = get_payload_from_coex_resp(skb);
 
-       if (payload[0] != COEX_RESP_ACK_BY_WL_FW)
+       if (payload[0] != COEX_RESP_ACK_BY_WL_FW) {
+               dev_kfree_skb_any(skb);
                return;
+       }
 
        skb_queue_tail(&coex->queue, skb);
        wake_up(&coex->wait);
@@ -630,20 +632,16 @@ static bool rtw_coex_get_bt_scan_type(struct rtw_dev *rtwdev, u8 *scan_type)
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_SCAN_TYPE;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *scan_type = GET_COEX_RESP_BT_SCAN_TYPE(payload);
        dev_kfree_skb_any(skb);
-       ret = true;
-
-out:
-       return ret;
+       return true;
 }
 
 static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
@@ -651,19 +649,15 @@ static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
 {
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT;
        req.para1 = lna_constrain_level;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        dev_kfree_skb_any(skb);
-       ret = true;
-
-out:
-       return ret;
+       return true;
 }
 
 #define case_BTSTATUS(src) \
@@ -3523,6 +3517,7 @@ static bool rtw_coex_get_bt_reg(struct rtw_dev *rtwdev,
 
        payload = get_payload_from_coex_resp(skb);
        *val = GET_COEX_RESP_BT_REG_VAL(payload);
+       dev_kfree_skb_any(skb);
 
        return true;
 }
@@ -3533,19 +3528,17 @@ static bool rtw_coex_get_bt_patch_version(struct rtw_dev *rtwdev,
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_PATCH_VER;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *patch_version = GET_COEX_RESP_BT_PATCH_VER(payload);
-       ret = true;
+       dev_kfree_skb_any(skb);
 
-out:
-       return ret;
+       return true;
 }
 
 static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
@@ -3554,19 +3547,17 @@ static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_SUPP_VER;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *supported_version = GET_COEX_RESP_BT_SUPP_VER(payload);
-       ret = true;
+       dev_kfree_skb_any(skb);
 
-out:
-       return ret;
+       return true;
 }
 
 static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
@@ -3575,19 +3566,17 @@ static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
        struct rtw_coex_info_req req = {0};
        struct sk_buff *skb;
        u8 *payload;
-       bool ret = false;
 
        req.op_code = BT_MP_INFO_OP_SUPP_FEAT;
        skb = rtw_coex_info_request(rtwdev, &req);
        if (!skb)
-               goto out;
+               return false;
 
        payload = get_payload_from_coex_resp(skb);
        *supported_feature = GET_COEX_RESP_BT_SUPP_FEAT(payload);
-       ret = true;
+       dev_kfree_skb_any(skb);
 
-out:
-       return ret;
+       return true;
 }
 
 struct rtw_coex_sta_stat_iter_data {
index 18ab472..dfd52cf 100644 (file)
@@ -11,6 +11,7 @@
 #include "debug.h"
 #include "phy.h"
 #include "reg.h"
+#include "ps.h"
 
 #ifdef CONFIG_RTW88_DEBUGFS
 
@@ -847,7 +848,13 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
        if (!input)
                return -EINVAL;
 
+       if (test_bit(RTW_FLAG_RESTARTING, rtwdev->flags))
+               return -EINPROGRESS;
+
+       mutex_lock(&rtwdev->mutex);
+       rtw_leave_lps_deep(rtwdev);
        rtw_write8(rtwdev, REG_HRCV_MSG, 1);
+       mutex_unlock(&rtwdev->mutex);
 
        return count;
 }
index c8efd19..0dd3f9a 100644 (file)
@@ -20,6 +20,7 @@ enum rtw_debug_mask {
        RTW_DBG_BF              = 0x00000800,
        RTW_DBG_WOW             = 0x00001000,
        RTW_DBG_CFO             = 0x00002000,
+       RTW_DBG_PATH_DIV        = 0x00004000,
 
        RTW_DBG_ALL             = 0xffffffff
 };
index ea2cd4d..3bfa5ec 100644 (file)
@@ -127,6 +127,62 @@ static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
        rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
 }
 
+struct rtw_beacon_filter_iter_data {
+       struct rtw_dev *rtwdev;
+       u8 *payload;
+};
+
+static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac,
+                                             struct ieee80211_vif *vif)
+{
+       struct rtw_beacon_filter_iter_data *iter_data = data;
+       struct rtw_dev *rtwdev = iter_data->rtwdev;
+       u8 *payload = iter_data->payload;
+       u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
+       u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
+       s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
+
+       switch (type) {
+       case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
+               event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
+                       NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+               ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
+               break;
+       case BCN_FILTER_CONNECTION_LOSS:
+               ieee80211_connection_loss(vif);
+               break;
+       case BCN_FILTER_CONNECTED:
+               rtwdev->beacon_loss = false;
+               break;
+       case BCN_FILTER_NOTIFY_BEACON_LOSS:
+               rtwdev->beacon_loss = true;
+               rtw_leave_lps(rtwdev);
+               break;
+       }
+}
+
+static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
+                                    u8 length)
+{
+       struct rtw_beacon_filter_iter_data dev_iter_data;
+
+       dev_iter_data.rtwdev = rtwdev;
+       dev_iter_data.payload = payload;
+       rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
+                        &dev_iter_data);
+}
+
+static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
+                              u8 length)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+       dm_info->scan_density = payload[0];
+
+       rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
+               dm_info->scan_density);
+}
+
 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
 {
        struct rtw_c2h_cmd *c2h;
@@ -152,6 +208,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
        case C2H_WLAN_INFO:
                rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
                break;
+       case C2H_BCN_FILTER_NOTIFY:
+               rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
+               break;
        case C2H_HALMAC:
                rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
                break;
@@ -186,6 +245,12 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
                break;
        case C2H_WLAN_RFON:
                complete(&rtwdev->lps_leave_check);
+               dev_kfree_skb_any(skb);
+               break;
+       case C2H_SCAN_RESULT:
+               complete(&rtwdev->fw_scan_density);
+               rtw_fw_scan_result(rtwdev, c2h->payload, len);
+               dev_kfree_skb_any(skb);
                break;
        default:
                /* pass offset for further operation */
@@ -527,6 +592,45 @@ void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
        rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
 }
 
+void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
+                                struct ieee80211_vif *vif)
+{
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
+       static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
+       struct rtw_sta_info *si =
+               sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
+       s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
+       u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+       if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si)
+               return;
+
+       if (!connect) {
+               SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
+               SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
+               rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+               return;
+       }
+       SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
+       ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
+       rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+       memset(h2c_pkt, 0, sizeof(h2c_pkt));
+       threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
+       SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
+       SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
+       SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
+                                              BCN_FILTER_OFFLOAD_MODE_DEFAULT);
+       SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
+       SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
+       SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
+       SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
+       SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
+       rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
 {
        struct rtw_lps_conf *conf = &rtwdev->lps_conf;
@@ -1613,3 +1717,13 @@ void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
 
        rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
 }
+
+void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
+{
+       u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+       SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
+       SET_SCAN_START(h2c_pkt, start);
+
+       rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
index 7c5b1d7..a8a7162 100644 (file)
 #define DLFW_BLK_SIZE_LEGACY           4
 #define FW_START_ADDR_LEGACY           0x1000
 
+#define BCN_LOSS_CNT                   10
+#define BCN_FILTER_NOTIFY_SIGNAL_CHANGE        0
+#define BCN_FILTER_CONNECTION_LOSS     1
+#define BCN_FILTER_CONNECTED           2
+#define BCN_FILTER_NOTIFY_BEACON_LOSS  3
+
+#define SCAN_NOTIFY_TIMEOUT  msecs_to_jiffies(10)
+
 enum rtw_c2h_cmd_id {
        C2H_CCX_TX_RPT = 0x03,
        C2H_BT_INFO = 0x09,
@@ -32,6 +40,8 @@ enum rtw_c2h_cmd_id {
        C2H_HW_FEATURE_REPORT = 0x19,
        C2H_WLAN_INFO = 0x27,
        C2H_WLAN_RFON = 0x32,
+       C2H_BCN_FILTER_NOTIFY = 0x36,
+       C2H_SCAN_RESULT = 0x38,
        C2H_HW_FEATURE_DUMP = 0xfd,
        C2H_HALMAC = 0xff,
 };
@@ -78,9 +88,20 @@ enum rtw_fw_feature {
        FW_FEATURE_LPS_C2H = BIT(1),
        FW_FEATURE_LCLK = BIT(2),
        FW_FEATURE_PG = BIT(3),
+       FW_FEATURE_BCN_FILTER = BIT(5),
+       FW_FEATURE_NOTIFY_SCAN = BIT(6),
        FW_FEATURE_MAX = BIT(31),
 };
 
+enum rtw_beacon_filter_offload_mode {
+       BCN_FILTER_OFFLOAD_MODE_0 = 0,
+       BCN_FILTER_OFFLOAD_MODE_1,
+       BCN_FILTER_OFFLOAD_MODE_2,
+       BCN_FILTER_OFFLOAD_MODE_3,
+
+       BCN_FILTER_OFFLOAD_MODE_DEFAULT = BCN_FILTER_OFFLOAD_MODE_1,
+};
+
 struct rtw_coex_info_req {
        u8 seq;
        u8 op_code;
@@ -237,6 +258,10 @@ struct rtw_fw_hdr_legacy {
 #define GET_RA_REPORT_BW(c2h_payload)          (c2h_payload[6])
 #define GET_RA_REPORT_MACID(c2h_payload)       (c2h_payload[1])
 
+#define GET_BCN_FILTER_NOTIFY_TYPE(c2h_payload)        (c2h_payload[1] & 0xf)
+#define GET_BCN_FILTER_NOTIFY_EVENT(c2h_payload)       (c2h_payload[1] & 0x10)
+#define GET_BCN_FILTER_NOTIFY_RSSI(c2h_payload)        (c2h_payload[2] - 100)
+
 /* PKT H2C */
 #define H2C_PKT_CMD_ID 0xFF
 #define H2C_PKT_CATEGORY 0x01
@@ -345,7 +370,10 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
 #define H2C_CMD_LPS_PG_INFO            0x2b
 #define H2C_CMD_RA_INFO                        0x40
 #define H2C_CMD_RSSI_MONITOR           0x42
+#define H2C_CMD_BCN_FILTER_OFFLOAD_P0  0x56
+#define H2C_CMD_BCN_FILTER_OFFLOAD_P1  0x57
 #define H2C_CMD_WL_PHY_INFO            0x58
+#define H2C_CMD_SCAN                   0x59
 
 #define H2C_CMD_COEX_TDMA_TYPE         0x60
 #define H2C_CMD_QUERY_BT_INFO          0x61
@@ -381,6 +409,23 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
 #define SET_WL_PHY_INFO_RX_EVM(h2c_pkt, value)                                \
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, value)                               \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, value)                      \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(16))
+#define SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, value)                        \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 17))
+#define SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt, value)                \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 21))
+#define SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, value)                   \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, value)                \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(3, 0))
+#define SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, value)                \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(13, 4))
+
+#define SET_SCAN_START(h2c_pkt, value)                                        \
+       le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
 
 #define SET_PWR_MODE_SET_MODE(h2c_pkt, value)                                  \
        le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
@@ -554,6 +599,12 @@ static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
        return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
 }
 
+static inline bool rtw_fw_feature_check(struct rtw_fw_state *fw,
+                                       enum rtw_fw_feature feature)
+{
+       return !!(fw->feature & feature);
+}
+
 void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
                               struct sk_buff *skb);
 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
@@ -577,6 +628,8 @@ void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
 void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
 void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev);
+void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
+                                struct ieee80211_vif *vif);
 int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
                                u8 *buf, u32 size);
 void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
@@ -607,5 +660,5 @@ void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c);
 void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
 int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
                     u32 *buffer);
-
+void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
 #endif
index 333df6b..6f56298 100644 (file)
@@ -153,6 +153,9 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
        u8 port = 0;
        u8 bcn_ctrl = 0;
 
+       if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
+               vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+                                    IEEE80211_VIF_SUPPORTS_CQM_RSSI;
        rtwvif->port = port;
        rtwvif->stats.tx_unicast = 0;
        rtwvif->stats.rx_unicast = 0;
@@ -399,6 +402,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
                        rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL,
                                        BIT_EN_BCNQ_DL);
        }
+       if (changed & BSS_CHANGED_CQM)
+               rtw_fw_beacon_filter_config(rtwdev, true, vif);
 
        if (changed & BSS_CHANGED_MU_GROUPS)
                rtw_chip_set_gid_table(rtwdev, vif, conf);
@@ -450,6 +455,7 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
 {
        struct rtw_dev *rtwdev = hw->priv;
 
+       rtw_fw_beacon_filter_config(rtwdev, false, vif);
        mutex_lock(&rtwdev->mutex);
        rtw_sta_remove(rtwdev, sta, true);
        mutex_unlock(&rtwdev->mutex);
@@ -599,6 +605,7 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
        rtw_vif_port_config(rtwdev, rtwvif, config);
 
        rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
+       rtw_core_fw_scan_notify(rtwdev, true);
 
        set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
        set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
@@ -618,6 +625,8 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
        clear_bit(RTW_FLAG_SCANNING, rtwdev->flags);
        clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags);
 
+       rtw_core_fw_scan_notify(rtwdev, false);
+
        ether_addr_copy(rtwvif->mac_addr, vif->addr);
        config |= PORT_SET_MAC_ADDR;
        rtw_vif_port_config(rtwdev, rtwvif, config);
@@ -629,7 +638,7 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
 
 static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif,
-                                  u16 duration)
+                                  struct ieee80211_prep_tx_info *info)
 {
        struct rtw_dev *rtwdev = hw->priv;
 
index f3a3a86..c636483 100644 (file)
@@ -2,6 +2,8 @@
 /* Copyright(c) 2018-2019  Realtek Corporation
  */
 
+#include <linux/devcoredump.h>
+
 #include "main.h"
 #include "regd.h"
 #include "fw.h"
@@ -239,7 +241,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
         * get that vif and check if device is having traffic more than the
         * threshold.
         */
-       if (rtwdev->ps_enabled && data.rtwvif && !ps_active)
+       if (rtwdev->ps_enabled && data.rtwvif && !ps_active &&
+           !rtwdev->beacon_loss)
                rtw_enter_lps(rtwdev, data.rtwvif->port);
 
        rtwdev->watch_dog_cnt++;
@@ -292,6 +295,7 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
        rtw_fw_media_status_report(rtwdev, si->mac_id, true);
 
        rtwdev->sta_cnt++;
+       rtwdev->beacon_loss = false;
        rtw_info(rtwdev, "sta %pM joined with macid %d\n",
                 sta->addr, si->mac_id);
 
@@ -318,59 +322,131 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
                 sta->addr, si->mac_id);
 }
 
-static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
+struct rtw_fwcd_hdr {
+       u32 item;
+       u32 size;
+       u32 padding1;
+       u32 padding2;
+} __packed;
+
+static int rtw_fwcd_prep(struct rtw_dev *rtwdev)
+{
+       struct rtw_chip_info *chip = rtwdev->chip;
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+       const struct rtw_fwcd_segs *segs = chip->fwcd_segs;
+       u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr);
+       u8 i;
+
+       if (segs) {
+               prep_size += segs->num * sizeof(struct rtw_fwcd_hdr);
+
+               for (i = 0; i < segs->num; i++)
+                       prep_size += segs->segs[i];
+       }
+
+       desc->data = vmalloc(prep_size);
+       if (!desc->data)
+               return -ENOMEM;
+
+       desc->size = prep_size;
+       desc->next = desc->data;
+
+       return 0;
+}
+
+static u8 *rtw_fwcd_next(struct rtw_dev *rtwdev, u32 item, u32 size)
+{
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+       struct rtw_fwcd_hdr *hdr;
+       u8 *next;
+
+       if (!desc->data) {
+               rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared successfully\n");
+               return NULL;
+       }
+
+       next = desc->next + sizeof(struct rtw_fwcd_hdr);
+       if (next - desc->data + size > desc->size) {
+               rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared enough\n");
+               return NULL;
+       }
+
+       hdr = (struct rtw_fwcd_hdr *)(desc->next);
+       hdr->item = item;
+       hdr->size = size;
+       hdr->padding1 = 0x01234567;
+       hdr->padding2 = 0x89abcdef;
+       desc->next = next + size;
+
+       return next;
+}
+
+static void rtw_fwcd_dump(struct rtw_dev *rtwdev)
+{
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+
+       rtw_dbg(rtwdev, RTW_DBG_FW, "dump fwcd\n");
+
+       /* Data will be freed after lifetime of device coredump. After calling
+        * dev_coredump, data is supposed to be handled by the device coredump
+        * framework. Note that a new dump will be discarded if a previous one
+        * hasn't been released yet.
+        */
+       dev_coredumpv(rtwdev->dev, desc->data, desc->size, GFP_KERNEL);
+}
+
+static void rtw_fwcd_free(struct rtw_dev *rtwdev, bool free_self)
+{
+       struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
+
+       if (free_self) {
+               rtw_dbg(rtwdev, RTW_DBG_FW, "free fwcd by self\n");
+               vfree(desc->data);
+       }
+
+       desc->data = NULL;
+       desc->next = NULL;
+}
+
+static int rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
 {
        u32 size = rtwdev->chip->fw_rxff_size;
        u32 *buf;
        u8 seq;
-       bool ret = true;
 
-       buf = vmalloc(size);
+       buf = (u32 *)rtw_fwcd_next(rtwdev, RTW_FWCD_TLV, size);
        if (!buf)
-               goto exit;
+               return -ENOMEM;
 
        if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) {
                rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n");
-               goto free_buf;
+               return -EINVAL;
        }
 
        if (GET_FW_DUMP_LEN(buf) == 0) {
                rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n");
-               goto free_buf;
+               return -EINVAL;
        }
 
        seq = GET_FW_DUMP_SEQ(buf);
-       if (seq > 0 && seq != (rtwdev->fw.prev_dump_seq + 1)) {
+       if (seq > 0) {
                rtw_dbg(rtwdev, RTW_DBG_FW,
                        "fw crash dump's seq is wrong: %d\n", seq);
-               goto free_buf;
-       }
-
-       print_hex_dump(KERN_ERR, "rtw88 fw dump: ", DUMP_PREFIX_OFFSET, 16, 1,
-                      buf, size, true);
-
-       if (GET_FW_DUMP_MORE(buf) == 1) {
-               rtwdev->fw.prev_dump_seq = seq;
-               ret = false;
+               return -EINVAL;
        }
 
-free_buf:
-       vfree(buf);
-exit:
-       rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
-
-       return ret;
+       return 0;
 }
 
 int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
-               const char *prefix_str)
+               u32 fwcd_item)
 {
        u32 rxff = rtwdev->chip->fw_rxff_size;
        u32 dump_size, done_size = 0;
        u8 *buf;
        int ret;
 
-       buf = vzalloc(size);
+       buf = rtw_fwcd_next(rtwdev, fwcd_item, size);
        if (!buf)
                return -ENOMEM;
 
@@ -383,7 +459,7 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
                        rtw_err(rtwdev,
                                "ddma fw 0x%x [+0x%x] to fw fifo fail\n",
                                ocp_src, done_size);
-                       goto exit;
+                       return ret;
                }
 
                ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0,
@@ -392,24 +468,18 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
                        rtw_err(rtwdev,
                                "dump fw 0x%x [+0x%x] from fw fifo fail\n",
                                ocp_src, done_size);
-                       goto exit;
+                       return ret;
                }
 
                size -= dump_size;
                done_size += dump_size;
        }
 
-       print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 1,
-                      buf, done_size, true);
-
-exit:
-       vfree(buf);
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(rtw_dump_fw);
 
-int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
-                const char *prefix_str)
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size)
 {
        u8 *buf;
        u32 i;
@@ -419,17 +489,13 @@ int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
                return -EINVAL;
        }
 
-       buf = vzalloc(size);
+       buf = rtw_fwcd_next(rtwdev, RTW_FWCD_REG, size);
        if (!buf)
                return -ENOMEM;
 
        for (i = 0; i < size; i += 4)
                *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i);
 
-       print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf,
-                      size, true);
-
-       vfree(buf);
        return 0;
 }
 EXPORT_SYMBOL(rtw_dump_reg);
@@ -487,20 +553,24 @@ void rtw_fw_recovery(struct rtw_dev *rtwdev)
 
 static void __fw_recovery_work(struct rtw_dev *rtwdev)
 {
-
-       /* rtw_fw_dump_crash_log() returns false indicates that there are
-        * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware
-        * to dump the remaining part of the log, and firmware will trigger an
-        * IMR_C2HCMD interrupt to inform driver the log is ready.
-        */
-       if (!rtw_fw_dump_crash_log(rtwdev)) {
-               rtw_write8(rtwdev, REG_HRCV_MSG, 1);
-               return;
-       }
-       rtwdev->fw.prev_dump_seq = 0;
+       int ret = 0;
 
        set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
-       rtw_chip_dump_fw_crash(rtwdev);
+
+       ret = rtw_fwcd_prep(rtwdev);
+       if (ret)
+               goto free;
+       ret = rtw_fw_dump_crash_log(rtwdev);
+       if (ret)
+               goto free;
+       ret = rtw_chip_dump_fw_crash(rtwdev);
+       if (ret)
+               goto free;
+
+       rtw_fwcd_dump(rtwdev);
+free:
+       rtw_fwcd_free(rtwdev, !!ret);
+       rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
 
        WARN(1, "firmware crash, start reset and recover\n");
 
@@ -1109,11 +1179,11 @@ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
                return LPS_DEEP_MODE_NONE;
 
        if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) &&
-           (fw->feature & FW_FEATURE_PG))
+           rtw_fw_feature_check(fw, FW_FEATURE_PG))
                return LPS_DEEP_MODE_PG;
 
        if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) &&
-           (fw->feature & FW_FEATURE_LCLK))
+           rtw_fw_feature_check(fw, FW_FEATURE_LCLK))
                return LPS_DEEP_MODE_LCLK;
 
        return LPS_DEEP_MODE_NONE;
@@ -1183,6 +1253,22 @@ err:
        return ret;
 }
 
+void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
+{
+       if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_NOTIFY_SCAN))
+               return;
+
+       if (start) {
+               rtw_fw_scan_notify(rtwdev, true);
+       } else {
+               reinit_completion(&rtwdev->fw_scan_density);
+               rtw_fw_scan_notify(rtwdev, false);
+               if (!wait_for_completion_timeout(&rtwdev->fw_scan_density,
+                                                SCAN_NOTIFY_TIMEOUT))
+                       rtw_warn(rtwdev, "firmware failed to report density after scan\n");
+       }
+}
+
 int rtw_core_start(struct rtw_dev *rtwdev)
 {
        int ret;
@@ -1761,6 +1847,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
 
        init_waitqueue_head(&rtwdev->coex.wait);
        init_completion(&rtwdev->lps_leave_check);
+       init_completion(&rtwdev->fw_scan_density);
 
        rtwdev->sec.total_cam_num = 32;
        rtwdev->hal.current_channel = 1;
@@ -1812,6 +1899,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
        destroy_workqueue(rtwdev->tx_wq);
        spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
        skb_queue_purge(&rtwdev->tx_report.queue);
+       skb_queue_purge(&rtwdev->coex.queue);
        spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
 
        list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
index dc37448..e5af375 100644 (file)
@@ -806,7 +806,7 @@ struct rtw_regulatory {
 
 struct rtw_chip_ops {
        int (*mac_init)(struct rtw_dev *rtwdev);
-       void (*dump_fw_crash)(struct rtw_dev *rtwdev);
+       int (*dump_fw_crash)(struct rtw_dev *rtwdev);
        void (*shutdown)(struct rtw_dev *rtwdev);
        int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
        void (*phy_set_param)(struct rtw_dev *rtwdev);
@@ -841,6 +841,10 @@ struct rtw_chip_ops {
                             u8 fixrate_en, u8 *new_rate);
        void (*cfo_init)(struct rtw_dev *rtwdev);
        void (*cfo_track)(struct rtw_dev *rtwdev);
+       void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path,
+                              enum rtw_bb_path tx_path_1ss,
+                              enum rtw_bb_path tx_path_cck,
+                              bool is_tx2_path);
 
        /* for coex */
        void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1108,6 +1112,15 @@ enum rtw_fw_fifo_sel {
        RTW_FW_FIFO_MAX,
 };
 
+enum rtw_fwcd_item {
+       RTW_FWCD_TLV,
+       RTW_FWCD_REG,
+       RTW_FWCD_ROM,
+       RTW_FWCD_IMEM,
+       RTW_FWCD_DMEM,
+       RTW_FWCD_EMEM,
+};
+
 /* hardware configuration for each IC */
 struct rtw_chip_info {
        struct rtw_chip_ops *ops;
@@ -1136,7 +1149,11 @@ struct rtw_chip_info {
        u8 max_power_index;
 
        u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
+       const struct rtw_fwcd_segs *fwcd_segs;
+
+       u8 default_1ss_tx_path;
 
+       bool path_div_supported;
        bool ht_supported;
        bool vht_supported;
        u8 lps_deep_mode_supported;
@@ -1614,6 +1631,8 @@ struct rtw_dm_info {
        struct rtw_iqk_info iqk;
        struct rtw_gapk_info gapk;
        bool is_bt_iqk_timeout;
+
+       u8 scan_density;
 };
 
 struct rtw_efuse {
@@ -1717,6 +1736,17 @@ struct rtw_fifo_conf {
        const struct rtw_rqpn *rqpn;
 };
 
+struct rtw_fwcd_desc {
+       u32 size;
+       u8 *next;
+       u8 *data;
+};
+
+struct rtw_fwcd_segs {
+       const u32 *segs;
+       u8 num;
+};
+
 #define FW_CD_TYPE 0xffff
 #define FW_CD_LEN 4
 #define FW_CD_VAL 0xaabbccdd
@@ -1724,11 +1754,11 @@ struct rtw_fw_state {
        const struct firmware *firmware;
        struct rtw_dev *rtwdev;
        struct completion completion;
+       struct rtw_fwcd_desc fwcd_desc;
        u16 version;
        u8 sub_version;
        u8 sub_index;
        u16 h2c_version;
-       u8 prev_dump_seq;
        u32 feature;
 };
 
@@ -1781,6 +1811,14 @@ struct rtw_hal {
                     [DESC_RATE_MAX];
 };
 
+struct rtw_path_div {
+       enum rtw_bb_path current_tx_path;
+       u32 path_a_sum;
+       u32 path_b_sum;
+       u16 path_a_cnt;
+       u16 path_b_cnt;
+};
+
 struct rtw_dev {
        struct ieee80211_hw *hw;
        struct device *dev;
@@ -1837,6 +1875,7 @@ struct rtw_dev {
        /* lps power state & handler work */
        struct rtw_lps_conf lps_conf;
        bool ps_enabled;
+       bool beacon_loss;
        struct completion lps_leave_check;
 
        struct dentry *debugfs;
@@ -1848,11 +1887,13 @@ struct rtw_dev {
        DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
 
        u8 mp_mode;
+       struct rtw_path_div dm_path_div;
 
        struct rtw_fw_state wow_fw;
        struct rtw_wow_param wow;
 
        bool need_rfk;
+       struct completion fw_scan_density;
 
        /* hci related data, must be last */
        u8 priv[] __aligned(sizeof(void *));
@@ -1923,10 +1964,12 @@ static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
        clear_bit(mac_id, rtwdev->mac_id_map);
 }
 
-static inline void rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
+static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
 {
        if (rtwdev->chip->ops->dump_fw_crash)
-               rtwdev->chip->ops->dump_fw_crash(rtwdev);
+               return rtwdev->chip->ops->dump_fw_crash(rtwdev);
+
+       return 0;
 }
 
 void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -1958,9 +2001,9 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
 void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
                    bool fw_exist);
 void rtw_fw_recovery(struct rtw_dev *rtwdev);
+void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
 int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
-               const char *prefix_str);
-int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size,
-                const char *prefix_str);
+               u32 fwcd_item);
+int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
 
 #endif
index f59a4c4..e7d17ab 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright(c) 2018-2019  Realtek Corporation
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include "main.h"
@@ -1673,6 +1674,36 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
        netif_napi_del(&rtwpci->napi);
 }
 
+enum rtw88_quirk_dis_pci_caps {
+       QUIRK_DIS_PCI_CAP_MSI,
+       QUIRK_DIS_PCI_CAP_ASPM,
+};
+
+static int disable_pci_caps(const struct dmi_system_id *dmi)
+{
+       uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
+
+       if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI))
+               rtw_disable_msi = true;
+       if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM))
+               rtw_pci_disable_aspm = true;
+
+       return 1;
+}
+
+static const struct dmi_system_id rtw88_pci_quirks[] = {
+       {
+               .callback = disable_pci_caps,
+               .ident = "Protempo Ltd L116HTN6SPW",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"),
+               },
+               .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
+       },
+       {}
+};
+
 int rtw_pci_probe(struct pci_dev *pdev,
                  const struct pci_device_id *id)
 {
@@ -1723,6 +1754,7 @@ int rtw_pci_probe(struct pci_dev *pdev,
                goto err_destroy_pci;
        }
 
+       dmi_check_system(rtw88_pci_quirks);
        rtw_pci_phy_cfg(rtwdev);
 
        ret = rtw_register_hw(rtwdev, hw);
index 8146aca..569dd3c 100644 (file)
@@ -127,6 +127,17 @@ static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
                chip->ops->cfo_init(rtwdev);
 }
 
+static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
+{
+       struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+
+       path_div->current_tx_path = rtwdev->chip->default_1ss_tx_path;
+       path_div->path_a_cnt = 0;
+       path_div->path_a_sum = 0;
+       path_div->path_b_cnt = 0;
+       path_div->path_b_sum = 0;
+}
+
 void rtw_phy_init(struct rtw_dev *rtwdev)
 {
        struct rtw_chip_info *chip = rtwdev->chip;
@@ -149,6 +160,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
 
        dm_info->iqk.done = false;
        rtw_phy_cfo_init(rtwdev);
+       rtw_phy_tx_path_div_init(rtwdev);
 }
 EXPORT_SYMBOL(rtw_phy_init);
 
@@ -695,6 +707,7 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
        rtw_phy_dig(rtwdev);
        rtw_phy_cck_pd(rtwdev);
        rtw_phy_ra_track(rtwdev);
+       rtw_phy_tx_path_diversity(rtwdev);
        rtw_phy_cfo_track(rtwdev);
        rtw_phy_dpk_track(rtwdev);
        rtw_phy_pwr_track(rtwdev);
@@ -2315,3 +2328,71 @@ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
        return false;
 }
 EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk);
+
+static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
+                                      enum rtw_bb_path tx_path_sel_1ss)
+{
+       struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+       enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
+       struct rtw_chip_info *chip = rtwdev->chip;
+
+       if (tx_path_sel_1ss == path_div->current_tx_path)
+               return;
+
+       path_div->current_tx_path = tx_path_sel_1ss;
+       rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "Switch TX path=%s\n",
+               tx_path_sel_1ss == BB_PATH_A ? "A" : "B");
+       chip->ops->config_tx_path(rtwdev, rtwdev->hal.antenna_tx,
+                                 tx_path_sel_1ss, tx_path_sel_cck, false);
+}
+
+static void rtw_phy_tx_path_div_select(struct rtw_dev *rtwdev)
+{
+       struct rtw_path_div *path_div = &rtwdev->dm_path_div;
+       enum rtw_bb_path path = path_div->current_tx_path;
+       s32 rssi_a = 0, rssi_b = 0;
+
+       if (path_div->path_a_cnt)
+               rssi_a = path_div->path_a_sum / path_div->path_a_cnt;
+       else
+               rssi_a = 0;
+       if (path_div->path_b_cnt)
+               rssi_b = path_div->path_b_sum / path_div->path_b_cnt;
+       else
+               rssi_b = 0;
+
+       if (rssi_a != rssi_b)
+               path = (rssi_a > rssi_b) ? BB_PATH_A : BB_PATH_B;
+
+       path_div->path_a_cnt = 0;
+       path_div->path_a_sum = 0;
+       path_div->path_b_cnt = 0;
+       path_div->path_b_sum = 0;
+       rtw_phy_set_tx_path_by_reg(rtwdev, path);
+}
+
+static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
+{
+       if (rtwdev->hal.antenna_rx != BB_PATH_AB) {
+               rtw_dbg(rtwdev, RTW_DBG_PATH_DIV,
+                       "[Return] tx_Path_en=%d, rx_Path_en=%d\n",
+                       rtwdev->hal.antenna_tx, rtwdev->hal.antenna_rx);
+               return;
+       }
+       if (rtwdev->sta_cnt == 0) {
+               rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "No Link\n");
+               return;
+       }
+
+       rtw_phy_tx_path_div_select(rtwdev);
+}
+
+void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
+{
+       struct rtw_chip_info *chip = rtwdev->chip;
+
+       if (!chip->path_div_supported)
+               return;
+
+       rtw_phy_tx_path_diversity_2ss(rtwdev);
+}
index 0b6f2fc..112ed12 100644 (file)
@@ -61,6 +61,7 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
                                struct rtw_swing_table *swing_table);
 void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
                         struct rtw_rx_pkt_stat *pkt_stat);
+void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev);
 
 struct rtw_txpwr_lmt_cfg_pair {
        u8 regd;
index 3bead34..3f0ac33 100644 (file)
@@ -152,7 +152,7 @@ static void rtw_fw_leave_lps_check(struct rtw_dev *rtwdev)
        else
                fw = &rtwdev->fw;
 
-       if (fw->feature & FW_FEATURE_LPS_C2H)
+       if (rtw_fw_feature_check(fw, FW_FEATURE_LPS_C2H))
                ret = __rtw_fw_leave_lps_check_c2h(rtwdev);
        else
                ret = __rtw_fw_leave_lps_check_reg(rtwdev);
@@ -172,7 +172,7 @@ static void rtw_fw_leave_lps_check_prepare(struct rtw_dev *rtwdev)
        else
                fw = &rtwdev->fw;
 
-       if (fw->feature & FW_FEATURE_LPS_C2H)
+       if (rtw_fw_feature_check(fw, FW_FEATURE_LPS_C2H))
                reinit_completion(&rtwdev->lps_leave_check);
 }
 
index 6cb593c..8bf3cd3 100644 (file)
@@ -17,7 +17,6 @@
 #include "util.h"
 #include "bf.h"
 #include "efuse.h"
-#include "coex.h"
 
 #define IQK_DONE_8822C 0xaa
 
@@ -80,6 +79,13 @@ static void rtw8822c_header_file_init(struct rtw_dev *rtwdev, bool pre)
                rtw_write32_set(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
 }
 
+static void rtw8822c_bb_reset(struct rtw_dev *rtwdev)
+{
+       rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+       rtw_write16_clr(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+       rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_BB_RSTB);
+}
+
 static void rtw8822c_dac_backup_reg(struct rtw_dev *rtwdev,
                                    struct rtw_backup_info *backup,
                                    struct rtw_backup_info *backup_rf)
@@ -2103,13 +2109,51 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
        return 0;
 }
 
-static void rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev)
+#define FWCD_SIZE_REG_8822C 0x2000
+#define FWCD_SIZE_DMEM_8822C 0x10000
+#define FWCD_SIZE_IMEM_8822C 0x10000
+#define FWCD_SIZE_EMEM_8822C 0x20000
+#define FWCD_SIZE_ROM_8822C 0x10000
+
+static const u32 __fwcd_segs_8822c[] = {
+       FWCD_SIZE_REG_8822C,
+       FWCD_SIZE_DMEM_8822C,
+       FWCD_SIZE_IMEM_8822C,
+       FWCD_SIZE_EMEM_8822C,
+       FWCD_SIZE_ROM_8822C,
+};
+
+static const struct rtw_fwcd_segs rtw8822c_fwcd_segs = {
+       .segs = __fwcd_segs_8822c,
+       .num = ARRAY_SIZE(__fwcd_segs_8822c),
+};
+
+static int rtw8822c_dump_fw_crash(struct rtw_dev *rtwdev)
 {
-       rtw_dump_reg(rtwdev, 0x0, 0x2000, "rtw8822c reg_");
-       rtw_dump_fw(rtwdev, OCPBASE_DMEM_88XX, 0x10000, "rtw8822c DMEM_");
-       rtw_dump_fw(rtwdev, OCPBASE_IMEM_88XX, 0x10000, "rtw8822c IMEM_");
-       rtw_dump_fw(rtwdev, OCPBASE_EMEM_88XX, 0x20000, "rtw8822c EMEM_");
-       rtw_dump_fw(rtwdev, OCPBASE_ROM_88XX, 0x10000, "rtw8822c ROM_");
+#define __dump_fw_8822c(_dev, _mem) \
+       rtw_dump_fw(_dev, OCPBASE_ ## _mem ## _88XX, \
+                   FWCD_SIZE_ ## _mem ## _8822C, RTW_FWCD_ ## _mem)
+       int ret;
+
+       ret = rtw_dump_reg(rtwdev, 0x0, FWCD_SIZE_REG_8822C);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, DMEM);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, IMEM);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, EMEM);
+       if (ret)
+               return ret;
+       ret = __dump_fw_8822c(rtwdev, ROM);
+       if (ret)
+               return ret;
+
+       return 0;
+
+#undef __dump_fw_8822c
 }
 
 static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable)
@@ -2424,10 +2468,11 @@ static void rtw8822c_config_cck_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
                else
                        rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
        }
+       rtw8822c_bb_reset(rtwdev);
 }
 
 static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
-                                        bool is_tx2_path)
+                                        enum rtw_bb_path tx_path_sel_1ss)
 {
        if (tx_path == BB_PATH_A) {
                rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x11);
@@ -2436,21 +2481,28 @@ static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
                rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x12);
                rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
        } else {
-               if (is_tx2_path) {
+               if (tx_path_sel_1ss == BB_PATH_AB) {
                        rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x33);
                        rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0404);
-               } else {
+               } else if (tx_path_sel_1ss == BB_PATH_B) {
+                       rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x32);
+                       rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
+               } else if (tx_path_sel_1ss == BB_PATH_A) {
                        rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x31);
                        rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
                }
        }
+       rtw8822c_bb_reset(rtwdev);
 }
 
 static void rtw8822c_config_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+                                   enum rtw_bb_path tx_path_sel_1ss,
+                                   enum rtw_bb_path tx_path_cck,
                                    bool is_tx2_path)
 {
-       rtw8822c_config_cck_tx_path(rtwdev, tx_path, is_tx2_path);
-       rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, is_tx2_path);
+       rtw8822c_config_cck_tx_path(rtwdev, tx_path_cck, is_tx2_path);
+       rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, tx_path_sel_1ss);
+       rtw8822c_bb_reset(rtwdev);
 }
 
 static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
@@ -2466,7 +2518,8 @@ static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
                rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x11111);
 
        rtw8822c_config_rx_path(rtwdev, rx_path);
-       rtw8822c_config_tx_path(rtwdev, tx_path, is_tx2_path);
+       rtw8822c_config_tx_path(rtwdev, tx_path, BB_PATH_A, BB_PATH_A,
+                               is_tx2_path);
 
        rtw8822c_toggle_igi(rtwdev);
 }
@@ -2517,6 +2570,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
 static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
                                   struct rtw_rx_pkt_stat *pkt_stat)
 {
+       struct rtw_path_div *p_div = &rtwdev->dm_path_div;
        struct rtw_dm_info *dm_info = &rtwdev->dm_info;
        u8 rxsc, bw;
        s8 min_rx_power = -120;
@@ -2559,6 +2613,13 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
        for (path = 0; path <= rtwdev->hal.rf_path_num; path++) {
                rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[path], 1);
                dm_info->rssi[path] = rssi;
+               if (path == RF_PATH_A) {
+                       p_div->path_a_sum += rssi;
+                       p_div->path_a_cnt++;
+               } else if (path == RF_PATH_B) {
+                       p_div->path_b_sum += rssi;
+                       p_div->path_b_cnt++;
+               }
                dm_info->rx_snr[path] = pkt_stat->rx_snr[path] >> 1;
                dm_info->cfo_tail[path] = (pkt_stat->cfo_tail[path] * 5) >> 1;
 
@@ -4371,26 +4432,28 @@ static void rtw8822c_pwrtrack_set(struct rtw_dev *rtwdev, u8 rf_path)
        }
 }
 
-static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
-                                   struct rtw_swing_table *swing_table,
-                                   u8 path)
+static void rtw8822c_pwr_track_stats(struct rtw_dev *rtwdev, u8 path)
 {
-       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
-       u8 thermal_value, delta;
+       u8 thermal_value;
 
        if (rtwdev->efuse.thermal_meter[path] == 0xff)
                return;
 
        thermal_value = rtw_read_rf(rtwdev, path, RF_T_METER, 0x7e);
-
        rtw_phy_pwrtrack_avg(rtwdev, thermal_value, path);
+}
 
-       delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
+static void rtw8822c_pwr_track_path(struct rtw_dev *rtwdev,
+                                   struct rtw_swing_table *swing_table,
+                                   u8 path)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       u8 delta;
 
+       delta = rtw_phy_pwrtrack_get_delta(rtwdev, path);
        dm_info->delta_power_index[path] =
                rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, path, path,
                                            delta);
-
        rtw8822c_pwrtrack_set(rtwdev, path);
 }
 
@@ -4401,12 +4464,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
 
        rtw_phy_config_swing_table(rtwdev, &swing_table);
 
+       for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+               rtw8822c_pwr_track_stats(rtwdev, i);
        if (rtw_phy_pwrtrack_need_lck(rtwdev))
                rtw8822c_do_lck(rtwdev);
-
        for (i = 0; i < rtwdev->hal.rf_path_num; i++)
                rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
-
 }
 
 static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
@@ -4851,6 +4914,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
        .cfg_csi_rate           = rtw_bf_cfg_csi_rate,
        .cfo_init               = rtw8822c_cfo_init,
        .cfo_track              = rtw8822c_cfo_track,
+       .config_tx_path         = rtw8822c_config_tx_path,
 
        .coex_set_init          = rtw8822c_coex_cfg_init,
        .coex_set_ant_switch    = NULL,
@@ -5192,6 +5256,8 @@ struct rtw_chip_info rtw8822c_hw_spec = {
        .band = RTW_BAND_2G | RTW_BAND_5G,
        .page_size = 128,
        .dig_min = 0x20,
+       .default_1ss_tx_path = BB_PATH_A,
+       .path_div_supported = true,
        .ht_supported = true,
        .vht_supported = true,
        .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK) | BIT(LPS_DEEP_MODE_PG),
@@ -5259,6 +5325,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
        .coex_info_hw_regs = coex_info_hw_regs_8822c,
 
        .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680},
+       .fwcd_segs = &rtw8822c_fwcd_segs,
 };
 EXPORT_SYMBOL(rtw8822c_hw_spec);
 
index 822f3da..f9e3d07 100644 (file)
@@ -16812,53 +16812,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x00010E46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00030246,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -18762,53 +18762,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -18957,53 +18957,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -19152,53 +19152,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -19347,53 +19347,53 @@ static const u32 rtw8822c_rf_a[] = {
        0x92000002,     0x00000000,     0x40000000,     0x00000000,
                0x03F, 0x0000EA46,
        0x93000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
-               0x03F, 0x00031E46,
+               0x03F, 0x0003D646,
        0xA0000000,     0x00000000,
                0x03F, 0x00002A46,
        0xB0000000,     0x00000000,
@@ -19610,21 +19610,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19633,21 +19633,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19656,21 +19656,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19679,21 +19679,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19702,21 +19702,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19725,21 +19725,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19748,21 +19748,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19771,21 +19771,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19794,21 +19794,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19817,21 +19817,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19840,21 +19840,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19863,21 +19863,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19886,21 +19886,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19909,21 +19909,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19932,21 +19932,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19955,21 +19955,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -19978,21 +19978,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20001,21 +20001,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20024,21 +20024,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20047,21 +20047,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20070,21 +20070,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20093,21 +20093,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20116,21 +20116,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -20139,21 +20139,21 @@ static const u32 rtw8822c_rf_a[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x000008C8,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x000008CB,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x000008CE,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x000008D1,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x000008D4,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000DD1,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0xA0000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000487,
@@ -38484,21 +38484,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38507,21 +38507,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38530,21 +38530,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38553,21 +38553,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38576,21 +38576,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38599,21 +38599,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38622,21 +38622,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x93000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38645,21 +38645,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38668,21 +38668,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38691,21 +38691,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38714,21 +38714,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38737,21 +38737,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38760,21 +38760,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38783,21 +38783,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38806,21 +38806,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x94000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38829,21 +38829,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000001,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38852,21 +38852,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000002,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38875,21 +38875,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000003,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38898,21 +38898,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000004,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38921,21 +38921,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000005,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38944,21 +38944,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000006,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38967,21 +38967,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000015,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -38990,21 +38990,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0x95000016,     0x00000000,     0x40000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000467,
@@ -39013,21 +39013,21 @@ static const u32 rtw8822c_rf_b[] = {
                0x033, 0x00000062,
                0x03F, 0x00000908,
                0x033, 0x00000063,
-               0x03F, 0x00000D09,
+               0x03F, 0x00000CC6,
                0x033, 0x00000064,
-               0x03F, 0x00000D49,
+               0x03F, 0x00000CC9,
                0x033, 0x00000065,
-               0x03F, 0x00000D8A,
+               0x03F, 0x00000CCC,
                0x033, 0x00000066,
-               0x03F, 0x00000DEB,
+               0x03F, 0x00000CCF,
                0x033, 0x00000067,
-               0x03F, 0x00000DEE,
+               0x03F, 0x00000CD2,
                0x033, 0x00000068,
-               0x03F, 0x00000DF1,
+               0x03F, 0x00000CD5,
                0x033, 0x00000069,
-               0x03F, 0x00000DF4,
+               0x03F, 0x00000DD4,
                0x033, 0x0000006A,
-               0x03F, 0x00000DF7,
+               0x03F, 0x00000DD7,
        0xA0000000,     0x00000000,
                0x033, 0x00000060,
                0x03F, 0x00000487,
index 249b3f1..de93843 100644 (file)
@@ -38,6 +38,24 @@ config MHI_WWAN_CTRL
          To compile this driver as a module, choose M here: the module will be
          called mhi_wwan_ctrl.
 
+config RPMSG_WWAN_CTRL
+       tristate "RPMSG WWAN control driver"
+       depends on RPMSG
+       help
+         RPMSG WWAN CTRL allows modems available via RPMSG channels to expose
+         different modem protocols/ports to userspace, including AT and QMI.
+         These protocols can be accessed directly from userspace
+         (e.g. AT commands) or via libraries/tools (e.g. libqmi, libqcdm...).
+
+         This is mainly used for modems integrated into many Qualcomm SoCs,
+         e.g. for AT and QMI on Qualcomm MSM8916 or MSM8974. Note that many
+         newer Qualcomm SoCs (e.g. SDM845) still provide an AT port through
+         this driver but the QMI messages can only be sent through
+         QRTR network sockets (CONFIG_QRTR).
+
+         To compile this driver as a module, choose M here: the module will be
+         called rpmsg_wwan_ctrl.
+
 config IOSM
        tristate "IOSM Driver for Intel M.2 WWAN Device"
        depends on INTEL_IOMMU
index 83dd348..d90ac33 100644 (file)
@@ -9,4 +9,5 @@ wwan-objs += wwan_core.o
 obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o
 
 obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
+obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o
 obj-$(CONFIG_IOSM) += iosm/
index 84087cf..fd356da 100644 (file)
@@ -30,6 +30,9 @@
 #define IP_MUX_SESSION_START 1
 #define IP_MUX_SESSION_END 8
 
+/* Default IP MUX channel */
+#define IP_MUX_SESSION_DEFAULT 1
+
 /**
  * ipc_imem_sys_port_open - Open a port link to CP.
  * @ipc_imem:  Imem instance.
index fbf3cab..e634ffc 100644 (file)
@@ -477,7 +477,7 @@ static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
        long long bytes;
        char *str;
 
-       if (!ul_adb || !ul_adb->dest_skb) {
+       if (!ul_adb->dest_skb) {
                dev_err(ipc_mux->dev, "no dest skb");
                return;
        }
index 1711b79..c999c64 100644 (file)
@@ -20,7 +20,7 @@
 #define IOSM_IF_ID_PAYLOAD 2
 
 /**
- * struct iosm_netdev_priv - netdev private data
+ * struct iosm_netdev_priv - netdev WWAN driver specific private data
  * @ipc_wwan:  Pointer to iosm_wwan struct
  * @netdev:    Pointer to network interface device structure
  * @if_id:     Interface id for device.
@@ -51,7 +51,7 @@ struct iosm_wwan {
 /* Bring-up the wwan net link */
 static int ipc_wwan_link_open(struct net_device *netdev)
 {
-       struct iosm_netdev_priv *priv = netdev_priv(netdev);
+       struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
        struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
        int if_id = priv->if_id;
        int ret;
@@ -88,7 +88,7 @@ out:
 /* Bring-down the wwan net link */
 static int ipc_wwan_link_stop(struct net_device *netdev)
 {
-       struct iosm_netdev_priv *priv = netdev_priv(netdev);
+       struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
 
        netif_stop_queue(netdev);
 
@@ -105,7 +105,7 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
 static int ipc_wwan_link_transmit(struct sk_buff *skb,
                                  struct net_device *netdev)
 {
-       struct iosm_netdev_priv *priv = netdev_priv(netdev);
+       struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
        struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
        int if_id = priv->if_id;
        int ret;
@@ -178,7 +178,7 @@ static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
            if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
                return -EINVAL;
 
-       priv = netdev_priv(dev);
+       priv = wwan_netdev_drvpriv(dev);
        priv->if_id = if_id;
        priv->netdev = dev;
        priv->ipc_wwan = ipc_wwan;
@@ -208,8 +208,8 @@ out_unlock:
 static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
                             struct list_head *head)
 {
+       struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
        struct iosm_wwan *ipc_wwan = ctxt;
-       struct iosm_netdev_priv *priv = netdev_priv(dev);
        int if_id = priv->if_id;
 
        if (WARN_ON(if_id < IP_MUX_SESSION_START ||
@@ -317,7 +317,9 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
        ipc_wwan->dev = dev;
        ipc_wwan->ipc_imem = ipc_imem;
 
-       if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan)) {
+       /* WWAN core will create a netdev for the default IP MUX channel */
+       if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
+                             IP_MUX_SESSION_DEFAULT)) {
                kfree(ipc_wwan);
                return NULL;
        }
@@ -329,22 +331,9 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
 
 void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
 {
-       int if_id;
-
+       /* This call will remove all child netdev(s) */
        wwan_unregister_ops(ipc_wwan->dev);
 
-       for (if_id = 0; if_id < ARRAY_SIZE(ipc_wwan->sub_netlist); if_id++) {
-               struct iosm_netdev_priv *priv;
-
-               priv = rcu_access_pointer(ipc_wwan->sub_netlist[if_id]);
-               if (!priv)
-                       continue;
-
-               rtnl_lock();
-               ipc_wwan_dellink(ipc_wwan, priv->netdev, NULL);
-               rtnl_unlock();
-       }
-
        mutex_destroy(&ipc_wwan->if_mutex);
 
        kfree(ipc_wwan);
diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c
new file mode 100644 (file)
index 0000000..31c2442
--- /dev/null
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Stephan Gerhold <stephan@gerhold.net> */
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/wwan.h>
+
+struct rpmsg_wwan_dev {
+       /* Lower level is a rpmsg dev, upper level is a wwan port */
+       struct rpmsg_device *rpdev;
+       struct wwan_port *wwan_port;
+       struct rpmsg_endpoint *ept;
+};
+
+static int rpmsg_wwan_ctrl_callback(struct rpmsg_device *rpdev,
+                                   void *buf, int len, void *priv, u32 src)
+{
+       struct rpmsg_wwan_dev *rpwwan = priv;
+       struct sk_buff *skb;
+
+       skb = alloc_skb(len, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put_data(skb, buf, len);
+       wwan_port_rx(rpwwan->wwan_port, skb);
+       return 0;
+}
+
+static int rpmsg_wwan_ctrl_start(struct wwan_port *port)
+{
+       struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+       struct rpmsg_channel_info chinfo = {
+               .src = rpwwan->rpdev->src,
+               .dst = RPMSG_ADDR_ANY,
+       };
+
+       strncpy(chinfo.name, rpwwan->rpdev->id.name, RPMSG_NAME_SIZE);
+       rpwwan->ept = rpmsg_create_ept(rpwwan->rpdev, rpmsg_wwan_ctrl_callback,
+                                      rpwwan, chinfo);
+       if (!rpwwan->ept)
+               return -EREMOTEIO;
+
+       return 0;
+}
+
+static void rpmsg_wwan_ctrl_stop(struct wwan_port *port)
+{
+       struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+
+       rpmsg_destroy_ept(rpwwan->ept);
+       rpwwan->ept = NULL;
+}
+
+static int rpmsg_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+       struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+       int ret;
+
+       ret = rpmsg_trysend(rpwwan->ept, skb->data, skb->len);
+       if (ret)
+               return ret;
+
+       consume_skb(skb);
+       return 0;
+}
+
+static int rpmsg_wwan_ctrl_tx_blocking(struct wwan_port *port, struct sk_buff *skb)
+{
+       struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+       int ret;
+
+       ret = rpmsg_send(rpwwan->ept, skb->data, skb->len);
+       if (ret)
+               return ret;
+
+       consume_skb(skb);
+       return 0;
+}
+
+static __poll_t rpmsg_wwan_ctrl_tx_poll(struct wwan_port *port,
+                                       struct file *filp, poll_table *wait)
+{
+       struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
+
+       return rpmsg_poll(rpwwan->ept, filp, wait);
+}
+
+static const struct wwan_port_ops rpmsg_wwan_pops = {
+       .start = rpmsg_wwan_ctrl_start,
+       .stop = rpmsg_wwan_ctrl_stop,
+       .tx = rpmsg_wwan_ctrl_tx,
+       .tx_blocking = rpmsg_wwan_ctrl_tx_blocking,
+       .tx_poll = rpmsg_wwan_ctrl_tx_poll,
+};
+
+static struct device *rpmsg_wwan_find_parent(struct device *dev)
+{
+       /* Select first platform device as parent for the WWAN ports.
+        * On Qualcomm platforms this is usually the platform device that
+        * represents the modem remote processor. This might need to be
+        * adjusted when adding device IDs for other platforms.
+        */
+       for (dev = dev->parent; dev; dev = dev->parent) {
+               if (dev_is_platform(dev))
+                       return dev;
+       }
+       return NULL;
+}
+
+static int rpmsg_wwan_ctrl_probe(struct rpmsg_device *rpdev)
+{
+       struct rpmsg_wwan_dev *rpwwan;
+       struct wwan_port *port;
+       struct device *parent;
+
+       parent = rpmsg_wwan_find_parent(&rpdev->dev);
+       if (!parent)
+               return -ENODEV;
+
+       rpwwan = devm_kzalloc(&rpdev->dev, sizeof(*rpwwan), GFP_KERNEL);
+       if (!rpwwan)
+               return -ENOMEM;
+
+       rpwwan->rpdev = rpdev;
+       dev_set_drvdata(&rpdev->dev, rpwwan);
+
+       /* Register as a wwan port, id.driver_data contains wwan port type */
+       port = wwan_create_port(parent, rpdev->id.driver_data,
+                               &rpmsg_wwan_pops, rpwwan);
+       if (IS_ERR(port))
+               return PTR_ERR(port);
+
+       rpwwan->wwan_port = port;
+
+       return 0;
+};
+
+static void rpmsg_wwan_ctrl_remove(struct rpmsg_device *rpdev)
+{
+       struct rpmsg_wwan_dev *rpwwan = dev_get_drvdata(&rpdev->dev);
+
+       wwan_remove_port(rpwwan->wwan_port);
+}
+
+static const struct rpmsg_device_id rpmsg_wwan_ctrl_id_table[] = {
+       /* RPMSG channels for Qualcomm SoCs with integrated modem */
+       { .name = "DATA5_CNTL", .driver_data = WWAN_PORT_QMI },
+       { .name = "DATA4", .driver_data = WWAN_PORT_AT },
+       {},
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_wwan_ctrl_id_table);
+
+static struct rpmsg_driver rpmsg_wwan_ctrl_driver = {
+       .drv.name = "rpmsg_wwan_ctrl",
+       .id_table = rpmsg_wwan_ctrl_id_table,
+       .probe = rpmsg_wwan_ctrl_probe,
+       .remove = rpmsg_wwan_ctrl_remove,
+};
+module_rpmsg_driver(rpmsg_wwan_ctrl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RPMSG WWAN CTRL Driver");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
index 7e72804..3e16c31 100644 (file)
@@ -500,7 +500,8 @@ static void wwan_port_op_stop(struct wwan_port *port)
        mutex_unlock(&port->ops_lock);
 }
 
-static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
+static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
+                          bool nonblock)
 {
        int ret;
 
@@ -510,7 +511,10 @@ static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
                goto out_unlock;
        }
 
-       ret = port->ops->tx(port, skb);
+       if (nonblock || !port->ops->tx_blocking)
+               ret = port->ops->tx(port, skb);
+       else
+               ret = port->ops->tx_blocking(port, skb);
 
 out_unlock:
        mutex_unlock(&port->ops_lock);
@@ -637,7 +641,7 @@ static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
                return -EFAULT;
        }
 
-       ret = wwan_port_op_tx(port, skb);
+       ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
        if (ret) {
                kfree_skb(skb);
                return ret;
@@ -653,12 +657,16 @@ static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
 
        poll_wait(filp, &port->waitqueue, wait);
 
-       if (!is_write_blocked(port))
+       mutex_lock(&port->ops_lock);
+       if (port->ops && port->ops->tx_poll)
+               mask |= port->ops->tx_poll(port, filp, wait);
+       else if (!is_write_blocked(port))
                mask |= EPOLLOUT | EPOLLWRNORM;
        if (!is_read_blocked(port))
                mask |= EPOLLIN | EPOLLRDNORM;
        if (!port->ops)
                mask |= EPOLLHUP | EPOLLERR;
+       mutex_unlock(&port->ops_lock);
 
        return mask;
 }
@@ -781,77 +789,6 @@ static const struct file_operations wwan_port_fops = {
        .llseek = noop_llseek,
 };
 
-/**
- * wwan_register_ops - register WWAN device ops
- * @parent: Device to use as parent and shared by all WWAN ports and
- *     created netdevs
- * @ops: operations to register
- * @ctxt: context to pass to operations
- *
- * Returns: 0 on success, a negative error code on failure
- */
-int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
-                     void *ctxt)
-{
-       struct wwan_device *wwandev;
-
-       if (WARN_ON(!parent || !ops))
-               return -EINVAL;
-
-       wwandev = wwan_create_dev(parent);
-       if (!wwandev)
-               return -ENOMEM;
-
-       if (WARN_ON(wwandev->ops)) {
-               wwan_remove_dev(wwandev);
-               return -EBUSY;
-       }
-
-       if (!try_module_get(ops->owner)) {
-               wwan_remove_dev(wwandev);
-               return -ENODEV;
-       }
-
-       wwandev->ops = ops;
-       wwandev->ops_ctxt = ctxt;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(wwan_register_ops);
-
-/**
- * wwan_unregister_ops - remove WWAN device ops
- * @parent: Device to use as parent and shared by all WWAN ports and
- *     created netdevs
- */
-void wwan_unregister_ops(struct device *parent)
-{
-       struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
-       bool has_ops;
-
-       if (WARN_ON(IS_ERR(wwandev)))
-               return;
-
-       has_ops = wwandev->ops;
-
-       /* put the reference obtained by wwan_dev_get_by_parent(),
-        * we should still have one (that the owner is giving back
-        * now) due to the ops being assigned, check that below
-        * and return if not.
-        */
-       put_device(&wwandev->dev);
-
-       if (WARN_ON(!has_ops))
-               return;
-
-       module_put(wwandev->ops->owner);
-
-       wwandev->ops = NULL;
-       wwandev->ops_ctxt = NULL;
-       wwan_remove_dev(wwandev);
-}
-EXPORT_SYMBOL_GPL(wwan_unregister_ops);
-
 static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
                              struct netlink_ext_ack *extack)
 {
@@ -878,6 +815,7 @@ static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
        const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
        struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
        struct net_device *dev;
+       unsigned int priv_size;
 
        if (IS_ERR(wwandev))
                return ERR_CAST(wwandev);
@@ -888,7 +826,8 @@ static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
                goto out;
        }
 
-       dev = alloc_netdev_mqs(wwandev->ops->priv_size, ifname, name_assign_type,
+       priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
+       dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type,
                               wwandev->ops->setup, num_tx_queues, num_rx_queues);
 
        if (dev) {
@@ -908,6 +847,7 @@ static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
 {
        struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
        u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
+       struct wwan_netdev_priv *priv = netdev_priv(dev);
        int ret;
 
        if (IS_ERR(wwandev))
@@ -919,6 +859,7 @@ static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
                goto out;
        }
 
+       priv->link_id = link_id;
        if (wwandev->ops->newlink)
                ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
                                            link_id, extack);
@@ -945,13 +886,34 @@ static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
        if (wwandev->ops->dellink)
                wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
        else
-               unregister_netdevice(dev);
+               unregister_netdevice_queue(dev, head);
 
 out:
        /* release the reference */
        put_device(&wwandev->dev);
 }
 
+static size_t wwan_rtnl_get_size(const struct net_device *dev)
+{
+       return
+               nla_total_size(4) +     /* IFLA_WWAN_LINK_ID */
+               0;
+}
+
+static int wwan_rtnl_fill_info(struct sk_buff *skb,
+                              const struct net_device *dev)
+{
+       struct wwan_netdev_priv *priv = netdev_priv(dev);
+
+       if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
 static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
        [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
 };
@@ -963,9 +925,167 @@ static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
        .validate = wwan_rtnl_validate,
        .newlink = wwan_rtnl_newlink,
        .dellink = wwan_rtnl_dellink,
+       .get_size = wwan_rtnl_get_size,
+       .fill_info = wwan_rtnl_fill_info,
        .policy = wwan_rtnl_policy,
 };
 
+static void wwan_create_default_link(struct wwan_device *wwandev,
+                                    u32 def_link_id)
+{
+       struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
+       struct nlattr *data[IFLA_WWAN_MAX + 1];
+       struct net_device *dev;
+       struct nlmsghdr *nlh;
+       struct sk_buff *msg;
+
+       /* Forge attributes required to create a WWAN netdev. We first
+        * build a netlink message and then parse it. This looks
+        * odd, but such approach is less error prone.
+        */
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (WARN_ON(!msg))
+               return;
+       nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0);
+       if (WARN_ON(!nlh))
+               goto free_attrs;
+
+       if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev)))
+               goto free_attrs;
+       tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO);
+       if (!tb[IFLA_LINKINFO])
+               goto free_attrs;
+       linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA);
+       if (!linkinfo[IFLA_INFO_DATA])
+               goto free_attrs;
+       if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id))
+               goto free_attrs;
+       nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]);
+       nla_nest_end(msg, tb[IFLA_LINKINFO]);
+
+       nlmsg_end(msg, nlh);
+
+       /* The next three parsing calls can not fail */
+       nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL);
+       nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO],
+                                   NULL, NULL);
+       nla_parse_nested_deprecated(data, IFLA_WWAN_MAX,
+                                   linkinfo[IFLA_INFO_DATA], NULL, NULL);
+
+       rtnl_lock();
+
+       dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM,
+                              &wwan_rtnl_link_ops, tb, NULL);
+       if (WARN_ON(IS_ERR(dev)))
+               goto unlock;
+
+       if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
+               free_netdev(dev);
+               goto unlock;
+       }
+
+unlock:
+       rtnl_unlock();
+
+free_attrs:
+       nlmsg_free(msg);
+}
+
+/**
+ * wwan_register_ops - register WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ *     created netdevs
+ * @ops: operations to register
+ * @ctxt: context to pass to operations
+ * @def_link_id: id of the default link that will be automatically created by
+ *     the WWAN core for the WWAN device. The default link will not be created
+ *     if the passed value is WWAN_NO_DEFAULT_LINK.
+ *
+ * Returns: 0 on success, a negative error code on failure
+ */
+int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
+                     void *ctxt, u32 def_link_id)
+{
+       struct wwan_device *wwandev;
+
+       if (WARN_ON(!parent || !ops || !ops->setup))
+               return -EINVAL;
+
+       wwandev = wwan_create_dev(parent);
+       if (!wwandev)
+               return -ENOMEM;
+
+       if (WARN_ON(wwandev->ops)) {
+               wwan_remove_dev(wwandev);
+               return -EBUSY;
+       }
+
+       wwandev->ops = ops;
+       wwandev->ops_ctxt = ctxt;
+
+       /* NB: we do not abort ops registration in case of default link
+        * creation failure. Link ops is the management interface, while the
+        * default link creation is a service option. And we should not prevent
+        * a user from manually creating a link latter if service option failed
+        * now.
+        */
+       if (def_link_id != WWAN_NO_DEFAULT_LINK)
+               wwan_create_default_link(wwandev, def_link_id);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(wwan_register_ops);
+
+/* Enqueue child netdev deletion */
+static int wwan_child_dellink(struct device *dev, void *data)
+{
+       struct list_head *kill_list = data;
+
+       if (dev->type == &wwan_type)
+               wwan_rtnl_dellink(to_net_dev(dev), kill_list);
+
+       return 0;
+}
+
+/**
+ * wwan_unregister_ops - remove WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ *     created netdevs
+ */
+void wwan_unregister_ops(struct device *parent)
+{
+       struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
+       LIST_HEAD(kill_list);
+
+       if (WARN_ON(IS_ERR(wwandev)))
+               return;
+       if (WARN_ON(!wwandev->ops)) {
+               put_device(&wwandev->dev);
+               return;
+       }
+
+       /* put the reference obtained by wwan_dev_get_by_parent(),
+        * we should still have one (that the owner is giving back
+        * now) due to the ops being assigned.
+        */
+       put_device(&wwandev->dev);
+
+       rtnl_lock();    /* Prevent concurent netdev(s) creation/destroying */
+
+       /* Remove all child netdev(s), using batch removing */
+       device_for_each_child(&wwandev->dev, &kill_list,
+                             wwan_child_dellink);
+       unregister_netdevice_many(&kill_list);
+
+       wwandev->ops = NULL;    /* Finally remove ops */
+
+       rtnl_unlock();
+
+       wwandev->ops_ctxt = NULL;
+       wwan_remove_dev(wwandev);
+}
+EXPORT_SYMBOL_GPL(wwan_unregister_ops);
+
 static int __init wwan_init(void)
 {
        int err;
index 472cae5..5b62cf3 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/skbuff.h>
+#include <linux/netdevice.h>
 #include <linux/wwan.h>
 #include <linux/debugfs.h>
 #include <linux/workqueue.h>
 
+#include <net/arp.h>
+
 static int wwan_hwsim_devsnum = 2;
 module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
 MODULE_PARM_DESC(devices, "Number of simulated devices");
@@ -64,6 +67,37 @@ static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops;
 static void wwan_hwsim_port_del_work(struct work_struct *work);
 static void wwan_hwsim_dev_del_work(struct work_struct *work);
 
+static netdev_tx_t wwan_hwsim_netdev_xmit(struct sk_buff *skb,
+                                         struct net_device *ndev)
+{
+       ndev->stats.tx_packets++;
+       ndev->stats.tx_bytes += skb->len;
+       consume_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops wwan_hwsim_netdev_ops = {
+       .ndo_start_xmit = wwan_hwsim_netdev_xmit,
+};
+
+static void wwan_hwsim_netdev_setup(struct net_device *ndev)
+{
+       ndev->netdev_ops = &wwan_hwsim_netdev_ops;
+       ndev->needs_free_netdev = true;
+
+       ndev->mtu = ETH_DATA_LEN;
+       ndev->min_mtu = ETH_MIN_MTU;
+       ndev->max_mtu = ETH_MAX_MTU;
+
+       ndev->type = ARPHRD_NONE;
+       ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+}
+
+static const struct wwan_ops wwan_hwsim_wwan_rtnl_ops = {
+       .priv_size = 0,                 /* No private data */
+       .setup = wwan_hwsim_netdev_setup,
+};
+
 static int wwan_hwsim_port_start(struct wwan_port *wport)
 {
        struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
@@ -254,6 +288,10 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
 
        INIT_WORK(&dev->del_work, wwan_hwsim_dev_del_work);
 
+       err = wwan_register_ops(&dev->dev, &wwan_hwsim_wwan_rtnl_ops, dev, 1);
+       if (err)
+               goto err_unreg_dev;
+
        dev->debugfs_topdir = debugfs_create_dir(dev_name(&dev->dev),
                                                 wwan_hwsim_debugfs_topdir);
        debugfs_create_file("destroy", 0200, dev->debugfs_topdir, dev,
@@ -265,6 +303,12 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
 
        return dev;
 
+err_unreg_dev:
+       device_unregister(&dev->dev);
+       /* Memory will be freed in the device release callback */
+
+       return ERR_PTR(err);
+
 err_free_dev:
        kfree(dev);
 
@@ -290,6 +334,9 @@ static void wwan_hwsim_dev_del(struct wwan_hwsim_dev *dev)
 
        debugfs_remove(dev->debugfs_topdir);
 
+       /* This will remove all child netdev(s) */
+       wwan_unregister_ops(&dev->dev);
+
        /* Make sure that there is no pending deletion work */
        if (current_work() != &dev->del_work)
                cancel_work_sync(&dev->del_work);
index 193b723..c58996c 100644 (file)
@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue)
 {
        if (queue->task) {
                kthread_stop(queue->task);
+               put_task_struct(queue->task);
                queue->task = NULL;
        }
 
@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue,
        if (IS_ERR(task))
                goto kthread_err;
        queue->task = task;
+       /*
+        * Take a reference to the task in order to prevent it from being freed
+        * if the thread function returns before kthread_stop is called.
+        */
+       get_task_struct(task);
 
        task = kthread_run(xenvif_dealloc_kthread, queue,
                           "%s-dealloc", queue->name);
index a0ce95a..2b0c723 100644 (file)
@@ -70,21 +70,16 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
        struct nxp_nci_info *info = nci_get_drvdata(ndev);
        int r;
 
-       if (!info->phy_ops->write) {
-               r = -ENOTSUPP;
-               goto send_exit;
-       }
+       if (!info->phy_ops->write)
+               return -EOPNOTSUPP;
 
-       if (info->mode != NXP_NCI_MODE_NCI) {
-               r = -EINVAL;
-               goto send_exit;
-       }
+       if (info->mode != NXP_NCI_MODE_NCI)
+               return -EINVAL;
 
        r = info->phy_ops->write(info->phy_id, skb);
        if (r < 0)
                kfree_skb(skb);
 
-send_exit:
        return r;
 }
 
@@ -104,10 +99,8 @@ int nxp_nci_probe(void *phy_id, struct device *pdev,
        int r;
 
        info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL);
-       if (!info) {
-               r = -ENOMEM;
-               goto probe_exit;
-       }
+       if (!info)
+               return -ENOMEM;
 
        info->phy_id = phy_id;
        info->pdev = pdev;
@@ -120,31 +113,25 @@ int nxp_nci_probe(void *phy_id, struct device *pdev,
        if (info->phy_ops->set_mode) {
                r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD);
                if (r < 0)
-                       goto probe_exit;
+                       return r;
        }
 
        info->mode = NXP_NCI_MODE_COLD;
 
        info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS,
                                         NXP_NCI_HDR_LEN, 0);
-       if (!info->ndev) {
-               r = -ENOMEM;
-               goto probe_exit;
-       }
+       if (!info->ndev)
+               return -ENOMEM;
 
        nci_set_parent_dev(info->ndev, pdev);
        nci_set_drvdata(info->ndev, info);
        r = nci_register_device(info->ndev);
-       if (r < 0)
-               goto probe_exit_free_nci;
+       if (r < 0) {
+               nci_free_device(info->ndev);
+               return r;
+       }
 
        *ndev = info->ndev;
-
-       goto probe_exit;
-
-probe_exit_free_nci:
-       nci_free_device(info->ndev);
-probe_exit:
        return r;
 }
 EXPORT_SYMBOL(nxp_nci_probe);
index dae0c80..119bf30 100644 (file)
@@ -95,10 +95,8 @@ static int nxp_nci_fw_send_chunk(struct nxp_nci_info *info)
        int r;
 
        skb = nci_skb_alloc(info->ndev, info->max_payload, GFP_KERNEL);
-       if (!skb) {
-               r = -ENOMEM;
-               goto chunk_exit;
-       }
+       if (!skb)
+               return -ENOMEM;
 
        chunk_len = info->max_payload - NXP_NCI_FW_HDR_LEN - NXP_NCI_FW_CRC_LEN;
        remaining_len = fw_info->frame_size - fw_info->written;
@@ -124,7 +122,6 @@ static int nxp_nci_fw_send_chunk(struct nxp_nci_info *info)
 
        kfree_skb(skb);
 
-chunk_exit:
        return r;
 }
 
index a44d49d..494675a 100644 (file)
@@ -71,7 +71,8 @@ config NVME_FC
 config NVME_TCP
        tristate "NVM Express over Fabrics TCP host driver"
        depends on INET
-       depends on BLK_DEV_NVME
+       depends on BLOCK
+       select NVME_CORE
        select NVME_FABRICS
        select CRYPTO
        select CRYPTO_CRC32C
index 762125f..66973bb 100644 (file)
@@ -3485,8 +3485,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
        cdev_init(cdev, fops);
        cdev->owner = owner;
        ret = cdev_device_add(cdev, cdev_device);
-       if (ret)
+       if (ret) {
+               put_device(cdev_device);
                ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
+       }
        return ret;
 }
 
index a2bb7fc..34a84d2 100644 (file)
@@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
                        cmd->connect.recfmt);
                break;
 
+       case NVME_SC_HOST_PATH_ERROR:
+               dev_err(ctrl->device,
+                       "Connect command failed: host path error\n");
+               break;
+
        default:
                dev_err(ctrl->device,
                        "Connect command failed, error wo/DNR bit: %d\n",
index 256e877..f183f9f 100644 (file)
@@ -3107,6 +3107,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ctrl->ctrl.icdoff) {
                dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
                                ctrl->ctrl.icdoff);
+               ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto out_disconnect_admin_queue;
        }
 
@@ -3114,6 +3115,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
                dev_err(ctrl->ctrl.device,
                        "Mandatory sgls are not supported!\n");
+               ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto out_disconnect_admin_queue;
        }
 
@@ -3280,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
        if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
                return;
 
-       if (portptr->port_state == FC_OBJSTATE_ONLINE)
+       if (portptr->port_state == FC_OBJSTATE_ONLINE) {
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
                        ctrl->cnum, status);
-       else if (time_after_eq(jiffies, rport->dev_loss_end))
+               if (status > 0 && (status & NVME_SC_DNR))
+                       recon = false;
+       } else if (time_after_eq(jiffies, rport->dev_loss_end))
                recon = false;
 
        if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
@@ -3298,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 
                queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
        } else {
-               if (portptr->port_state == FC_OBJSTATE_ONLINE)
-                       dev_warn(ctrl->ctrl.device,
-                               "NVME-FC{%d}: Max reconnect attempts (%d) "
-                               "reached.\n",
-                               ctrl->cnum, ctrl->ctrl.nr_reconnects);
-               else
+               if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+                       if (status > 0 && (status & NVME_SC_DNR))
+                               dev_warn(ctrl->ctrl.device,
+                                        "NVME-FC{%d}: reconnect failure\n",
+                                        ctrl->cnum);
+                       else
+                               dev_warn(ctrl->ctrl.device,
+                                        "NVME-FC{%d}: Max reconnect attempts "
+                                        "(%d) reached.\n",
+                                        ctrl->cnum, ctrl->ctrl.nr_reconnects);
+               } else
                        dev_warn(ctrl->ctrl.device,
                                "NVME-FC{%d}: dev_loss_tmo (%d) expired "
                                "while waiting for remoteport connectivity.\n",
index 37943dc..4697a94 100644 (file)
@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
                int count)
 {
        struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
-       struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
        struct ib_sge *sge = &req->sge[1];
+       struct scatterlist *sgl;
        u32 len = 0;
        int i;
 
-       for (i = 0; i < count; i++, sgl++, sge++) {
+       for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
                sge->addr = sg_dma_address(sgl);
                sge->length = sg_dma_len(sgl);
                sge->lkey = queue->device->pd->local_dma_lkey;
                len += sge->length;
+               sge++;
        }
 
        sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
index 1853db3..b20b8d0 100644 (file)
@@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
 {
        struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
                        struct nvmet_ctrl, ka_work);
-       bool cmd_seen = ctrl->cmd_seen;
+       bool reset_tbkas = ctrl->reset_tbkas;
 
-       ctrl->cmd_seen = false;
-       if (cmd_seen) {
+       ctrl->reset_tbkas = false;
+       if (reset_tbkas) {
                pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
                        ctrl->cntlid);
                schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
@@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
        percpu_ref_exit(&sq->ref);
 
        if (ctrl) {
+               /*
+                * The teardown flow may take some time, and the host may not
+                * send us keep-alive during this period, hence reset the
+                * traffic based keep-alive timer so we don't trigger a
+                * controller teardown as a result of a keep-alive expiration.
+                */
+               ctrl->reset_tbkas = true;
                nvmet_ctrl_put(ctrl);
                sq->ctrl = NULL; /* allows reusing the queue later */
        }
@@ -952,7 +959,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
        }
 
        if (sq->ctrl)
-               sq->ctrl->cmd_seen = true;
+               sq->ctrl->reset_tbkas = true;
 
        return true;
 
@@ -998,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
        return req->transfer_len - req->metadata_len;
 }
 
-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+               struct nvmet_req *req)
 {
-       req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+       req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
                        nvmet_data_transfer_len(req));
        if (!req->sg)
                goto out_err;
 
        if (req->metadata_len) {
-               req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+               req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
                                &req->metadata_sg_cnt, req->metadata_len);
                if (!req->metadata_sg)
                        goto out_free_sg;
        }
+
+       req->p2p_dev = p2p_dev;
+
        return 0;
 out_free_sg:
        pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1018,25 +1029,19 @@ out_err:
        return -ENOMEM;
 }
 
-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
 {
-       if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
-               return false;
-
-       if (req->sq->ctrl && req->sq->qid && req->ns) {
-               req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
-                                                req->ns->nsid);
-               if (req->p2p_dev)
-                       return true;
-       }
-
-       req->p2p_dev = NULL;
-       return false;
+       if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+           !req->sq->ctrl || !req->sq->qid || !req->ns)
+               return NULL;
+       return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
 }
 
 int nvmet_req_alloc_sgls(struct nvmet_req *req)
 {
-       if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+       struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+       if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
                return 0;
 
        req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1065,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
                pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
                if (req->metadata_sg)
                        pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+               req->p2p_dev = NULL;
        } else {
                sgl_free(req->sg);
                if (req->metadata_sg)
index cb30cb9..a5c4a18 100644 (file)
@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
-       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+       if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+               return;
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
                clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
        }
+       ctrl->ctrl.queue_count = 1;
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        return 0;
 
 out_cleanup_queue:
+       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
 out_cleanup_fabrics_q:
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
        nvme_loop_shutdown_ctrl(ctrl);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
-               /* state change failure should never happen */
-               WARN_ON_ONCE(1);
+               if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+                   ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+                       /* state change failure for non-deleted ctrl? */
+                       WARN_ON_ONCE(1);
                return;
        }
 
index d69a409..53aea9a 100644 (file)
@@ -167,7 +167,7 @@ struct nvmet_ctrl {
        struct nvmet_subsys     *subsys;
        struct nvmet_sq         **sqs;
 
-       bool                    cmd_seen;
+       bool                    reset_tbkas;
 
        struct mutex            lock;
        u64                     cap;
index f9f34f6..d8aceef 100644 (file)
@@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
                 * nvmet_req_init is completed.
                 */
                if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
-                   len && len < cmd->req.port->inline_data_size &&
+                   len && len <= cmd->req.port->inline_data_size &&
                    nvme_is_write(cmd->req.cmd))
                        return;
        }
index eca805c..9e6ce0d 100644 (file)
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
 obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
 obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
 obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
 obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
 obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
 
@@ -38,6 +39,6 @@ ifdef CONFIG_ACPI
 ifdef CONFIG_PCI_QUIRKS
 obj-$(CONFIG_ARM64) += pcie-al.o
 obj-$(CONFIG_ARM64) += pcie-hisi.o
-obj-$(CONFIG_ARM64) += pcie-tegra194.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
 endif
 endif
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644 (file)
index 0000000..c2de6ed
--- /dev/null
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam  {
+       void __iomem *config_base;
+       void __iomem *iatu_base;
+       void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+       struct device *dev = cfg->parent;
+       struct tegra194_pcie_ecam *pcie_ecam;
+
+       pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+       if (!pcie_ecam)
+               return -ENOMEM;
+
+       pcie_ecam->config_base = cfg->win;
+       pcie_ecam->iatu_base = cfg->win + SZ_256K;
+       pcie_ecam->dbi_base = cfg->win + SZ_512K;
+       cfg->priv = pcie_ecam;
+
+       return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+                         u32 val, u32 reg)
+{
+       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+       writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+                                int index, int type, u64 cpu_addr,
+                                u64 pci_addr, u64 size)
+{
+       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+                     PCIE_ATU_LOWER_BASE);
+       atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+                     PCIE_ATU_UPPER_BASE);
+       atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+                     PCIE_ATU_LOWER_TARGET);
+       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+                     PCIE_ATU_LIMIT);
+       atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+                     PCIE_ATU_UPPER_TARGET);
+       atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
+       atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+                                     unsigned int devfn, int where)
+{
+       struct pci_config_window *cfg = bus->sysdata;
+       struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+       u32 busdev;
+       int type;
+
+       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+               return NULL;
+
+       if (bus->number == cfg->busr.start) {
+               if (PCI_SLOT(devfn) == 0)
+                       return pcie_ecam->dbi_base + where;
+               else
+                       return NULL;
+       }
+
+       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+                PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+       if (bus->parent->number == cfg->busr.start) {
+               if (PCI_SLOT(devfn) == 0)
+                       type = PCIE_ATU_TYPE_CFG0;
+               else
+                       return NULL;
+       } else {
+               type = PCIE_ATU_TYPE_CFG1;
+       }
+
+       program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+                            SZ_256K);
+
+       return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+       .init           = tegra194_acpi_init,
+       .pci_ops        = {
+               .map_bus        = tegra194_map_bus,
+               .read           = pci_generic_config_read,
+               .write          = pci_generic_config_write,
+       }
+};
index bafd2c6..504669e 100644 (file)
@@ -22,8 +22,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/pci-ecam.h>
 #include <linux/phy/phy.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
@@ -247,24 +245,6 @@ static const unsigned int pcie_gen_freq[] = {
        GEN4_CORE_CLK_FREQ
 };
 
-static const u32 event_cntr_ctrl_offset[] = {
-       0x1d8,
-       0x1a8,
-       0x1a8,
-       0x1a8,
-       0x1c4,
-       0x1d8
-};
-
-static const u32 event_cntr_data_offset[] = {
-       0x1dc,
-       0x1ac,
-       0x1ac,
-       0x1ac,
-       0x1c8,
-       0x1dc
-};
-
 struct tegra_pcie_dw {
        struct device *dev;
        struct resource *appl_res;
@@ -313,104 +293,6 @@ struct tegra_pcie_dw_of_data {
        enum dw_pcie_device_mode mode;
 };
 
-#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-struct tegra194_pcie_ecam  {
-       void __iomem *config_base;
-       void __iomem *iatu_base;
-       void __iomem *dbi_base;
-};
-
-static int tegra194_acpi_init(struct pci_config_window *cfg)
-{
-       struct device *dev = cfg->parent;
-       struct tegra194_pcie_ecam *pcie_ecam;
-
-       pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
-       if (!pcie_ecam)
-               return -ENOMEM;
-
-       pcie_ecam->config_base = cfg->win;
-       pcie_ecam->iatu_base = cfg->win + SZ_256K;
-       pcie_ecam->dbi_base = cfg->win + SZ_512K;
-       cfg->priv = pcie_ecam;
-
-       return 0;
-}
-
-static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
-                         u32 val, u32 reg)
-{
-       u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
-       writel(val, pcie_ecam->iatu_base + offset + reg);
-}
-
-static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
-                                int index, int type, u64 cpu_addr,
-                                u64 pci_addr, u64 size)
-{
-       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
-                     PCIE_ATU_LOWER_BASE);
-       atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
-                     PCIE_ATU_UPPER_BASE);
-       atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
-                     PCIE_ATU_LOWER_TARGET);
-       atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
-                     PCIE_ATU_LIMIT);
-       atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
-                     PCIE_ATU_UPPER_TARGET);
-       atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
-       atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
-}
-
-static void __iomem *tegra194_map_bus(struct pci_bus *bus,
-                                     unsigned int devfn, int where)
-{
-       struct pci_config_window *cfg = bus->sysdata;
-       struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
-       u32 busdev;
-       int type;
-
-       if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
-               return NULL;
-
-       if (bus->number == cfg->busr.start) {
-               if (PCI_SLOT(devfn) == 0)
-                       return pcie_ecam->dbi_base + where;
-               else
-                       return NULL;
-       }
-
-       busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
-                PCIE_ATU_FUNC(PCI_FUNC(devfn));
-
-       if (bus->parent->number == cfg->busr.start) {
-               if (PCI_SLOT(devfn) == 0)
-                       type = PCIE_ATU_TYPE_CFG0;
-               else
-                       return NULL;
-       } else {
-               type = PCIE_ATU_TYPE_CFG1;
-       }
-
-       program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
-                            SZ_256K);
-
-       return pcie_ecam->config_base + where;
-}
-
-const struct pci_ecam_ops tegra194_pcie_ops = {
-       .init           = tegra194_acpi_init,
-       .pci_ops        = {
-               .map_bus        = tegra194_map_bus,
-               .read           = pci_generic_config_read,
-               .write          = pci_generic_config_write,
-       }
-};
-#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
-
-#ifdef CONFIG_PCIE_TEGRA194
-
 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
 {
        return container_of(pci, struct tegra_pcie_dw, pci);
@@ -694,6 +576,24 @@ static struct pci_ops tegra_pci_ops = {
 };
 
 #if defined(CONFIG_PCIEASPM)
+static const u32 event_cntr_ctrl_offset[] = {
+       0x1d8,
+       0x1a8,
+       0x1a8,
+       0x1a8,
+       0x1c4,
+       0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+       0x1dc,
+       0x1ac,
+       0x1ac,
+       0x1ac,
+       0x1c8,
+       0x1dc
+};
+
 static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
 {
        u32 val;
@@ -2411,5 +2311,3 @@ MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
 MODULE_LICENSE("GPL v2");
-
-#endif /* CONFIG_PCIE_TEGRA194 */
index 051b48b..e3f5e7a 100644 (file)
@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
                udelay(PIO_RETRY_DELAY);
        }
 
-       dev_err(dev, "config read/write timed out\n");
+       dev_err(dev, "PIO read/write transfer time out\n");
        return -ETIMEDOUT;
 }
 
@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
        return true;
 }
 
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+       struct device *dev = &pcie->pdev->dev;
+
+       /*
+        * Trying to start a new PIO transfer when previous has not completed
+        * cause External Abort on CPU which results in kernel panic:
+        *
+        *     SError Interrupt on CPU0, code 0xbf000002 -- SError
+        *     Kernel panic - not syncing: Asynchronous SError Interrupt
+        *
+        * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+        * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+        * concurrent calls at the same time. But because PIO transfer may take
+        * about 1.5s when link is down or card is disconnected, it means that
+        * advk_pcie_wait_pio() does not always have to wait for completion.
+        *
+        * Some versions of ARM Trusted Firmware handles this External Abort at
+        * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+        * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+        */
+       if (advk_readl(pcie, PIO_START)) {
+               dev_err(dev, "Previous PIO read/write transfer is still running\n");
+               return true;
+       }
+
+       return false;
+}
+
 static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
                             int where, int size, u32 *val)
 {
@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
                return pci_bridge_emul_conf_read(&pcie->bridge, where,
                                                 size, val);
 
-       /* Start PIO */
-       advk_writel(pcie, 0, PIO_START);
-       advk_writel(pcie, 1, PIO_ISR);
+       if (advk_pcie_pio_is_running(pcie)) {
+               *val = 0xffffffff;
+               return PCIBIOS_SET_FAILED;
+       }
 
        /* Program the control register */
        reg = advk_readl(pcie, PIO_CTRL);
@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
        /* Program the data strobe */
        advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
 
-       /* Start the transfer */
+       /* Clear PIO DONE ISR and start the transfer */
+       advk_writel(pcie, 1, PIO_ISR);
        advk_writel(pcie, 1, PIO_START);
 
        ret = advk_pcie_wait_pio(pcie);
@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
        if (where % size)
                return PCIBIOS_SET_FAILED;
 
-       /* Start PIO */
-       advk_writel(pcie, 0, PIO_START);
-       advk_writel(pcie, 1, PIO_ISR);
+       if (advk_pcie_pio_is_running(pcie))
+               return PCIBIOS_SET_FAILED;
 
        /* Program the control register */
        reg = advk_readl(pcie, PIO_CTRL);
@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
        /* Program the data strobe */
        advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
 
-       /* Start the transfer */
+       /* Clear PIO DONE ISR and start the transfer */
+       advk_writel(pcie, 1, PIO_ISR);
        advk_writel(pcie, 1, PIO_START);
 
        ret = advk_pcie_wait_pio(pcie);
index da5b414..a143b02 100644 (file)
@@ -103,6 +103,13 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
 #endif
 }
 
+bool pci_host_of_has_msi_map(struct device *dev)
+{
+       if (dev && dev->of_node)
+               return of_get_property(dev->of_node, "msi-map", NULL);
+       return false;
+}
+
 static inline int __of_pci_pci_compare(struct device_node *node,
                                       unsigned int data)
 {
@@ -346,6 +353,8 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
                                dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
                                         dev_node);
                        *io_base = range.cpu_addr;
+               } else if (resource_type(res) == IORESOURCE_MEM) {
+                       res->flags &= ~IORESOURCE_MEM_64;
                }
 
                pci_add_resource_offset(resources, res, res->start - range.pci_addr);
index 3a62d09..2752046 100644 (file)
@@ -925,7 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
        device_enable_async_suspend(bus->bridge);
        pci_set_bus_of_node(bus);
        pci_set_bus_msi_domain(bus);
-       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
+           !pci_host_of_has_msi_map(parent))
                bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
 
        if (!parent)
index dcb229d..22b2bb1 100644 (file)
@@ -3547,6 +3547,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
 }
 
 /*
+ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
+ * prevented for those affected devices.
+ */
+static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
+{
+       if ((dev->device & 0xffc0) == 0x2340)
+               quirk_no_bus_reset(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+                        quirk_nvidia_no_bus_reset);
+
+/*
  * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
  * The device will throw a Link Down error on AER-capable systems and
  * regardless of AER, config space of the device is never accessible again
@@ -3566,6 +3578,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
  */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
 
+/*
+ * Some TI KeyStone C667X devices do not support bus/hot reset.  The PCIESS
+ * automatically disables LTSSM when Secondary Bus Reset is received and
+ * the device stops working.  Prevent bus reset for these devices.  With
+ * this change, the device can be assigned to VMs with VFIO, but it will
+ * leak state between VMs.  Reference
+ * https://e2e.ti.com/support/processors/f/791/t/954382
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
+
 static void quirk_no_pm_reset(struct pci_dev *dev)
 {
        /*
@@ -3901,6 +3923,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
        return 0;
 }
 
+#define PCI_DEVICE_ID_HINIC_VF      0x375E
+#define HINIC_VF_FLR_TYPE           0x1000
+#define HINIC_VF_FLR_CAP_BIT        (1UL << 30)
+#define HINIC_VF_OP                 0xE80
+#define HINIC_VF_FLR_PROC_BIT       (1UL << 18)
+#define HINIC_OPERATION_TIMEOUT     15000      /* 15 seconds */
+
+/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
+static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
+{
+       unsigned long timeout;
+       void __iomem *bar;
+       u32 val;
+
+       if (probe)
+               return 0;
+
+       bar = pci_iomap(pdev, 0, 0);
+       if (!bar)
+               return -ENOTTY;
+
+       /* Get and check firmware capabilities */
+       val = ioread32be(bar + HINIC_VF_FLR_TYPE);
+       if (!(val & HINIC_VF_FLR_CAP_BIT)) {
+               pci_iounmap(pdev, bar);
+               return -ENOTTY;
+       }
+
+       /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
+       val = ioread32be(bar + HINIC_VF_OP);
+       val = val | HINIC_VF_FLR_PROC_BIT;
+       iowrite32be(val, bar + HINIC_VF_OP);
+
+       pcie_flr(pdev);
+
+       /*
+        * The device must recapture its Bus and Device Numbers after FLR
+        * in order generate Completions.  Issue a config write to let the
+        * device capture this information.
+        */
+       pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
+
+       /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
+       timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
+       do {
+               val = ioread32be(bar + HINIC_VF_OP);
+               if (!(val & HINIC_VF_FLR_PROC_BIT))
+                       goto reset_complete;
+               msleep(20);
+       } while (time_before(jiffies, timeout));
+
+       val = ioread32be(bar + HINIC_VF_OP);
+       if (!(val & HINIC_VF_FLR_PROC_BIT))
+               goto reset_complete;
+
+       pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
+
+reset_complete:
+       pci_iounmap(pdev, bar);
+
+       return 0;
+}
+
 static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
                 reset_intel_82599_sfp_virtfn },
@@ -3913,6 +3998,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
        { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
        { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
                reset_chelsio_generic_dev },
+       { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
+               reset_hinic_vf_dev },
        { 0 }
 };
 
@@ -4753,6 +4840,8 @@ static const struct pci_dev_acs_enabled {
        { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
        { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
        { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
+       /* Broadcom multi-function device */
+       { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
        /* Amazon Annapurna Labs */
        { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
@@ -5154,7 +5243,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
 static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
 {
        if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
-           (pdev->device == 0x7340 && pdev->revision != 0xc5))
+           (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
+           (pdev->device == 0x7341 && pdev->revision != 0x00))
                return;
 
        if (pdev->device == 0x15d8) {
@@ -5181,6 +5271,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
 /* AMD Navi14 dGPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
 /* AMD Raven platform iGPU */
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
 #endif /* CONFIG_PCI_ATS */
index 899b9eb..a39f30f 100644 (file)
@@ -78,7 +78,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
         * Other architectures (e.g., ARM) either do not support big endian, or
         * else leave I/O in little endian mode.
         */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(addr);
        else
                return readl_relaxed(addr);
@@ -87,7 +87,7 @@ static inline u32 brcm_usb_readl(void __iomem *addr)
 static inline void brcm_usb_writel(u32 val, void __iomem *addr)
 {
        /* See brcmnand_readl() comments */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(val, addr);
        else
                writel_relaxed(val, addr);
index 5c68e31..e93818e 100644 (file)
@@ -940,6 +940,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
        sp->nsubnodes = node;
 
        if (sp->num_lanes > SIERRA_MAX_LANES) {
+               ret = -EINVAL;
                dev_err(dev, "Invalid lane configuration\n");
                goto put_child2;
        }
index cdbcc49..731c483 100644 (file)
@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
                break;
        default:
                dev_err(tphy->dev, "incompatible PHY type\n");
+               clk_disable_unprepare(instance->ref_clk);
+               clk_disable_unprepare(instance->da_ref_clk);
                return -EINVAL;
        }
 
index c8a7d09..4076580 100644 (file)
@@ -2470,6 +2470,10 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
        priv->coreclock = clock;
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!iores) {
+               dev_err(priv->dev, "Invalid resource\n");
+               return -EINVAL;
+       }
        iomem = devm_ioremap(priv->dev, iores->start, resource_size(iores));
        if (IS_ERR(iomem)) {
                dev_err(priv->dev, "Unable to get serdes registers: %s\n",
index 753cb5b..2a9465f 100644 (file)
@@ -341,7 +341,7 @@ static struct platform_driver mt7621_pci_phy_driver = {
        .probe = mt7621_pci_phy_probe,
        .driver = {
                .name = "mt7621-pci-phy",
-               .of_match_table = of_match_ptr(mt7621_pci_phy_ids),
+               .of_match_table = mt7621_pci_phy_ids,
        },
 };
 
index 9eb6d37..126f5b8 100644 (file)
@@ -1212,6 +1212,7 @@ static int wiz_probe(struct platform_device *pdev)
 
                if (wiz->typec_dir_delay < WIZ_TYPEC_DIR_DEBOUNCE_MIN ||
                    wiz->typec_dir_delay > WIZ_TYPEC_DIR_DEBOUNCE_MAX) {
+                       ret = -EINVAL;
                        dev_err(dev, "Invalid typec-dir-debounce property\n");
                        goto err_addr_to_resource;
                }
index 996ebcb..4c0d266 100644 (file)
@@ -2702,8 +2702,8 @@ static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
 }
 
 /**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g5_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
  *
  * @ctx: The pinmux context
  * @expr: The expression associated with the function whose signal is to be
index 5c1a109..eeab093 100644 (file)
@@ -2611,8 +2611,8 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
 };
 
 /**
- * Configure a pin's signal by applying an expression's descriptor state for
- * all descriptors in the expression.
+ * aspeed_g6_sig_expr_set() - Configure a pin's signal by applying an
+ * expression's descriptor state for all descriptors in the expression.
  *
  * @ctx: The pinmux context
  * @expr: The expression associated with the function whose signal is to be
index 9c65d56..9bbfe5c 100644 (file)
@@ -108,7 +108,8 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
 }
 
 /**
- * Disable a signal on a pin by disabling all provided signal expressions.
+ * aspeed_disable_sig() - Disable a signal on a pin by disabling all provided
+ * signal expressions.
  *
  * @ctx: The pinmux context
  * @exprs: The list of signal expressions (from a priority level on a pin)
index 57305ca..894e2ef 100644 (file)
@@ -21,7 +21,8 @@ static inline void aspeed_sig_desc_print_val(
 }
 
 /**
- * Query the enabled or disabled state of a signal descriptor
+ * aspeed_sig_desc_eval() - Query the enabled or disabled state of a signal
+ * descriptor.
  *
  * @desc: The signal descriptor of interest
  * @enabled: True to query the enabled state, false to query disabled state
index 25d2f7f..11e967d 100644 (file)
@@ -223,7 +223,7 @@ config PINCTRL_SC7280
 config PINCTRL_SC8180X
        tristate "Qualcomm Technologies Inc SC8180x pin controller driver"
        depends on GPIOLIB && (OF || ACPI)
-       select PINCTRL_MSM
+       depends on PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
index 5aaf57b..0bb4931 100644 (file)
@@ -410,15 +410,15 @@ static const char * const gpio_groups[] = {
        "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
        "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
        "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
-       "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
-       "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
-       "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
-       "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
-       "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
-       "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
-       "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
-       "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
-       "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+       "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+       "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+       "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+       "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+       "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+       "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+       "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+       "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+       "gpio105", "gpio106", "gpio107",
 };
 
 static const char * const qdss_stm_groups[] = {
index 1f4bca8..a9b511c 100644 (file)
@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
        if (p->groups[group].enabled) {
                dev_err(p->dev, "%s is already enabled\n",
                        p->groups[group].name);
-               return -EBUSY;
+               return 0;
        }
 
        p->groups[group].enabled = 1;
index a9db2f3..b013445 100644 (file)
@@ -683,13 +683,13 @@ static int mlxreg_hotplug_probe(struct platform_device *pdev)
 
        err = devm_request_irq(&pdev->dev, priv->irq,
                               mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
-                              | IRQF_SHARED | IRQF_NO_AUTOEN,
-                              "mlxreg-hotplug", priv);
+                              | IRQF_SHARED, "mlxreg-hotplug", priv);
        if (err) {
                dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
                return err;
        }
 
+       disable_irq(priv->irq);
        spin_lock_init(&priv->lock);
        INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
        dev_set_drvdata(&pdev->dev, priv);
index 8a70df6..a06964a 100644 (file)
@@ -1907,7 +1907,7 @@ static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
 {
        int status;
 
-       status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
+       status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
 
        if (status < 0 && status != -EINVAL) {
                ssam_err(ctrl,
index 685d37a..ef83461 100644 (file)
@@ -156,7 +156,7 @@ static const struct software_node *ssam_node_group_sl2[] = {
        NULL,
 };
 
-/* Devices for Surface Laptop 3. */
+/* Devices for Surface Laptop 3 and 4. */
 static const struct software_node *ssam_node_group_sl3[] = {
        &ssam_node_root,
        &ssam_node_bat_ac,
@@ -521,9 +521,12 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
        /* Surface Laptop 3 (13", Intel) */
        { "MSHW0114", (unsigned long)ssam_node_group_sl3 },
 
-       /* Surface Laptop 3 (15", AMD) */
+       /* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
        { "MSHW0110", (unsigned long)ssam_node_group_sl3 },
 
+       /* Surface Laptop 4 (13", Intel) */
+       { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
+
        /* Surface Laptop Go 1 */
        { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
 
index 5d9b758..1203b9a 100644 (file)
@@ -427,6 +427,7 @@ static int surface_dtx_open(struct inode *inode, struct file *file)
         */
        if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
                up_write(&ddev->client_lock);
+               mutex_destroy(&client->read_lock);
                sdtx_device_put(client->ddev);
                kfree(client);
                return -ENODEV;
index dd60c93..edd71e7 100644 (file)
@@ -8853,6 +8853,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (2nd gen) */
        TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (3nd gen) */
        TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),  /* P15 (1st gen) / P15v (1st gen) */
+       TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL),  /* X1 Carbon (9th gen) */
 };
 
 static int __init fan_init(struct ibm_init_struct *iibm)
index a780435..841d890 100644 (file)
@@ -117,7 +117,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
                delta = ktime_to_ns(kt);
                err = ops->adjtime(ops, delta);
        } else if (tx->modes & ADJ_FREQUENCY) {
-               s32 ppb = scaled_ppm_to_ppb(tx->freq);
+               long ppb = scaled_ppm_to_ppb(tx->freq);
                if (ppb > ops->max_adj || ppb < -ops->max_adj)
                        return -ERANGE;
                if (ops->adjfine)
index 9d84d92..3e7a385 100644 (file)
@@ -1031,7 +1031,7 @@ config REGULATOR_RT5033
          current source, LDO and Buck.
 
 config REGULATOR_RTMV20
-       tristate "RTMV20 Laser Diode Regulator"
+       tristate "Richtek RTMV20 Laser Diode Regulator"
        depends on I2C
        select REGMAP_I2C
        help
index d8b4299..05147d2 100644 (file)
@@ -28,16 +28,16 @@ static const struct linear_range atc2609a_dcdc_voltage_ranges[] = {
 
 static const struct linear_range atc2609a_ldo_voltage_ranges0[] = {
        REGULATOR_LINEAR_RANGE(700000, 0, 15, 100000),
-       REGULATOR_LINEAR_RANGE(2100000, 16, 28, 100000),
+       REGULATOR_LINEAR_RANGE(2100000, 0, 12, 100000),
 };
 
 static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
        REGULATOR_LINEAR_RANGE(850000, 0, 15, 100000),
-       REGULATOR_LINEAR_RANGE(2100000, 16, 27, 100000),
+       REGULATOR_LINEAR_RANGE(2100000, 0, 11, 100000),
 };
 
 static const unsigned int atc260x_ldo_voltage_range_sel[] = {
-       0x0, 0x1,
+       0x0, 0x20,
 };
 
 static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -411,7 +411,7 @@ enum atc2609a_reg_ids {
        .owner = THIS_MODULE, \
 }
 
-#define atc2609a_reg_desc_ldo_range_pick(num, n_range) { \
+#define atc2609a_reg_desc_ldo_range_pick(num, n_range, n_volt) { \
        .name = "LDO"#num, \
        .supply_name = "ldo"#num, \
        .of_match = of_match_ptr("ldo"#num), \
@@ -421,6 +421,7 @@ enum atc2609a_reg_ids {
        .type = REGULATOR_VOLTAGE, \
        .linear_ranges = atc2609a_ldo_voltage_ranges##n_range, \
        .n_linear_ranges = ARRAY_SIZE(atc2609a_ldo_voltage_ranges##n_range), \
+       .n_voltages = n_volt, \
        .vsel_reg = ATC2609A_PMU_LDO##num##_CTL0, \
        .vsel_mask = GENMASK(4, 1), \
        .vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
@@ -458,12 +459,12 @@ static const struct regulator_desc atc2609a_reg[] = {
        atc2609a_reg_desc_ldo_bypass(0),
        atc2609a_reg_desc_ldo_bypass(1),
        atc2609a_reg_desc_ldo_bypass(2),
-       atc2609a_reg_desc_ldo_range_pick(3, 0),
-       atc2609a_reg_desc_ldo_range_pick(4, 0),
+       atc2609a_reg_desc_ldo_range_pick(3, 0, 29),
+       atc2609a_reg_desc_ldo_range_pick(4, 0, 29),
        atc2609a_reg_desc_ldo(5),
-       atc2609a_reg_desc_ldo_range_pick(6, 1),
-       atc2609a_reg_desc_ldo_range_pick(7, 0),
-       atc2609a_reg_desc_ldo_range_pick(8, 0),
+       atc2609a_reg_desc_ldo_range_pick(6, 1, 28),
+       atc2609a_reg_desc_ldo_range_pick(7, 0, 29),
+       atc2609a_reg_desc_ldo_range_pick(8, 0, 29),
        atc2609a_reg_desc_ldo_fixed(9),
 };
 
index e61295b..b1eb469 100644 (file)
@@ -334,7 +334,7 @@ BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
            NULL);
 
 BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
-           regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
+           regulator_map_voltage_ascend, bd71837_set_voltage_sel_restricted,
            regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
            NULL);
 /*
index f192bf1..e20e77e 100644 (file)
@@ -1425,6 +1425,12 @@ static int set_machine_constraints(struct regulator_dev *rdev)
         * and we have control then make sure it is enabled.
         */
        if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+               /* If we want to enable this regulator, make sure that we know
+                * the supplying regulator.
+                */
+               if (rdev->supply_name && !rdev->supply)
+                       return -EPROBE_DEFER;
+
                if (rdev->supply) {
                        ret = regulator_enable(rdev->supply);
                        if (ret < 0) {
index eb3fc1d..c4754f3 100644 (file)
@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
 
        drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
        if (IS_ERR(drvdata->dev)) {
+               ret = PTR_ERR(drvdata->dev);
                dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
-               return PTR_ERR(drvdata->dev);
+               return ret;
        }
 
        platform_set_drvdata(pdev, drvdata);
index 08cbf68..e669250 100644 (file)
@@ -280,7 +280,7 @@ static unsigned int da9121_map_mode(unsigned int mode)
        case DA9121_BUCK_MODE_FORCE_PFM:
                return REGULATOR_MODE_STANDBY;
        default:
-               return -EINVAL;
+               return REGULATOR_MODE_INVALID;
        }
 }
 
@@ -317,7 +317,7 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
 {
        struct da9121 *chip = rdev_get_drvdata(rdev);
        int id = rdev_get_id(rdev);
-       unsigned int val;
+       unsigned int val, mode;
        int ret = 0;
 
        ret = regmap_read(chip->regmap, da9121_mode_field[id].reg, &val);
@@ -326,7 +326,11 @@ static unsigned int da9121_buck_get_mode(struct regulator_dev *rdev)
                return -EINVAL;
        }
 
-       return da9121_map_mode(val & da9121_mode_field[id].msk);
+       mode = da9121_map_mode(val & da9121_mode_field[id].msk);
+       if (mode == REGULATOR_MODE_INVALID)
+               return -EINVAL;
+
+       return mode;
 }
 
 static const struct regulator_ops da9121_buck_ops = {
index f3918f0..26f06f6 100644 (file)
@@ -55,7 +55,6 @@
 
 #define FAN53555_NVOLTAGES     64      /* Numbers of voltages */
 #define FAN53526_NVOLTAGES     128
-#define TCS4525_NVOLTAGES      127     /* Numbers of voltages */
 
 #define TCS_VSEL_NSEL_MASK     0x7f
 #define TCS_VSEL0_MODE         (1 << 7)
@@ -376,7 +375,7 @@ static int fan53555_voltages_setup_tcs(struct fan53555_device_info *di)
        /* Init voltage range and step */
        di->vsel_min = 600000;
        di->vsel_step = 6250;
-       di->vsel_count = TCS4525_NVOLTAGES;
+       di->vsel_count = FAN53526_NVOLTAGES;
 
        return 0;
 }
index e83eb4f..1684faf 100644 (file)
@@ -51,6 +51,7 @@ static const struct regulator_ops fan53880_ops = {
                      REGULATOR_LINEAR_RANGE(800000, 0xf, 0x73, 25000), \
                },                                                      \
                .n_linear_ranges = 2,                                   \
+               .n_voltages =      0x74,                                \
                .vsel_reg =        FAN53880_LDO ## _num ## VOUT,        \
                .vsel_mask =       0x7f,                                \
                .enable_reg =      FAN53880_ENABLE,                     \
@@ -76,6 +77,7 @@ static const struct regulator_desc fan53880_regulators[] = {
                      REGULATOR_LINEAR_RANGE(600000, 0x1f, 0xf7, 12500),
                },
                .n_linear_ranges = 2,
+               .n_voltages =      0xf8,
                .vsel_reg =        FAN53880_BUCKVOUT,
                .vsel_mask =       0x7f,
                .enable_reg =      FAN53880_ENABLE,
@@ -95,6 +97,7 @@ static const struct regulator_desc fan53880_regulators[] = {
                      REGULATOR_LINEAR_RANGE(3000000, 0x4, 0x70, 25000),
                },
                .n_linear_ranges = 2,
+               .n_voltages =      0x71,
                .vsel_reg =        FAN53880_BOOSTVOUT,
                .vsel_mask =       0x7f,
                .enable_reg =      FAN53880_ENABLE_BOOST,
index 02ad831..34e255c 100644 (file)
@@ -88,10 +88,15 @@ static int reg_domain_disable(struct regulator_dev *rdev)
 {
        struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
        struct device *dev = rdev->dev.parent;
+       int ret;
+
+       ret = dev_pm_genpd_set_performance_state(dev, 0);
+       if (ret)
+               return ret;
 
        priv->enable_counter--;
 
-       return dev_pm_genpd_set_performance_state(dev, 0);
+       return 0;
 }
 
 static int reg_is_enabled(struct regulator_dev *rdev)
index 0e16e31..ad2237a 100644 (file)
@@ -948,7 +948,7 @@ int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
        int ret;
        unsigned int sel;
 
-       if (!rdev->desc->n_ramp_values)
+       if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
                return -EINVAL;
 
        ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
index f6a14e9..d6340bb 100644 (file)
@@ -3,7 +3,7 @@
 // Device driver for regulators in Hisi IC
 //
 // Copyright (c) 2013 Linaro Ltd.
-// Copyright (c) 2011 Hisilicon.
+// Copyright (c) 2011 HiSilicon Ltd.
 // Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
 //
 // Guodong Xu <guodong.xu@linaro.org>
@@ -83,7 +83,7 @@ static const unsigned int ldo34_voltages[] = {
                        .owner          = THIS_MODULE,                         \
                        .volt_table     = vtable,                              \
                        .n_voltages     = ARRAY_SIZE(vtable),                  \
-                       .vsel_mask      = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
+                       .vsel_mask      = ARRAY_SIZE(vtable) - 1,              \
                        .vsel_reg       = vreg,                                \
                        .enable_reg     = ereg,                                \
                        .enable_mask    = emask,                               \
index ac2ee20..68cdb17 100644 (file)
@@ -2,7 +2,7 @@
 //
 // Device driver for regulators in Hi655x IC
 //
-// Copyright (c) 2016 Hisilicon.
+// Copyright (c) 2016 HiSilicon Ltd.
 //
 // Authors:
 // Chen Feng <puck.chen@hisilicon.com>
index 8d9731e..3cf8f08 100644 (file)
@@ -814,6 +814,13 @@ static int max77620_regulator_probe(struct platform_device *pdev)
        config.dev = dev;
        config.driver_data = pmic;
 
+       /*
+        * Set of_node_reuse flag to prevent driver core from attempting to
+        * claim any pinmux resources already claimed by the parent device.
+        * Otherwise PMIC driver will fail to re-probe.
+        */
+       device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
        for (id = 0; id < MAX77620_NUM_REGS; id++) {
                struct regulator_dev *rdev;
                struct regulator_desc *rdesc;
@@ -839,12 +846,10 @@ static int max77620_regulator_probe(struct platform_device *pdev)
                        return ret;
 
                rdev = devm_regulator_register(dev, rdesc, &config);
-               if (IS_ERR(rdev)) {
-                       ret = PTR_ERR(rdev);
-                       dev_err(dev, "Regulator registration %s failed: %d\n",
-                               rdesc->name, ret);
-                       return ret;
-               }
+               if (IS_ERR(rdev))
+                       return dev_err_probe(dev, PTR_ERR(rdev),
+                                            "Regulator registration %s failed\n",
+                                            rdesc->name);
        }
 
        return 0;
index 9edc349..6b8be52 100644 (file)
@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
        REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
 };
 
-static unsigned int mt6315_map_mode(u32 mode)
+static unsigned int mt6315_map_mode(unsigned int mode)
 {
        switch (mode) {
        case MT6315_BUCK_MODE_AUTO:
index 2055a9c..7a87788 100644 (file)
@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
        struct gpio_descs *gpios = priv->enable_gpios;
        int id = rdev_get_id(rdev), ret;
 
-       if (gpios->ndescs <= id) {
+       if (!gpios || gpios->ndescs <= id) {
                dev_warn(&rdev->dev, "no dedicated gpio can control\n");
                goto bypass_gpio;
        }
@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
        struct gpio_descs *gpios = priv->enable_gpios;
        int id = rdev_get_id(rdev);
 
-       if (gpios->ndescs <= id) {
+       if (!gpios || gpios->ndescs <= id) {
                dev_warn(&rdev->dev, "no dedicated gpio can control\n");
                goto bypass_gpio;
        }
index 852fb25..4bca64d 100644 (file)
@@ -27,6 +27,7 @@
 #define RTMV20_REG_LDIRQ       0x30
 #define RTMV20_REG_LDSTAT      0x40
 #define RTMV20_REG_LDMASK      0x50
+#define RTMV20_MAX_REGS                (RTMV20_REG_LDMASK + 1)
 
 #define RTMV20_VID_MASK                GENMASK(7, 4)
 #define RICHTEK_VID            0x80
@@ -103,9 +104,47 @@ static int rtmv20_lsw_disable(struct regulator_dev *rdev)
        return 0;
 }
 
+static int rtmv20_lsw_set_current_limit(struct regulator_dev *rdev, int min_uA,
+                                       int max_uA)
+{
+       int sel;
+
+       if (min_uA > RTMV20_LSW_MAXUA || max_uA < RTMV20_LSW_MINUA)
+               return -EINVAL;
+
+       if (max_uA > RTMV20_LSW_MAXUA)
+               max_uA = RTMV20_LSW_MAXUA;
+
+       sel = (max_uA - RTMV20_LSW_MINUA) / RTMV20_LSW_STEPUA;
+
+       /* Ensure the selected setting is still in range */
+       if ((sel * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA) < min_uA)
+               return -EINVAL;
+
+       sel <<= ffs(rdev->desc->csel_mask) - 1;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg,
+                                 rdev->desc->csel_mask, sel);
+}
+
+static int rtmv20_lsw_get_current_limit(struct regulator_dev *rdev)
+{
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val);
+       if (ret)
+               return ret;
+
+       val &= rdev->desc->csel_mask;
+       val >>= ffs(rdev->desc->csel_mask) - 1;
+
+       return val * RTMV20_LSW_STEPUA + RTMV20_LSW_MINUA;
+}
+
 static const struct regulator_ops rtmv20_regulator_ops = {
-       .set_current_limit = regulator_set_current_limit_regmap,
-       .get_current_limit = regulator_get_current_limit_regmap,
+       .set_current_limit = rtmv20_lsw_set_current_limit,
+       .get_current_limit = rtmv20_lsw_get_current_limit,
        .enable = rtmv20_lsw_enable,
        .disable = rtmv20_lsw_disable,
        .is_enabled = regulator_is_enabled_regmap,
@@ -275,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
        .max_register = RTMV20_REG_LDMASK,
+       .num_reg_defaults_raw = RTMV20_MAX_REGS,
 
        .writeable_reg = rtmv20_is_accessible_reg,
        .readable_reg = rtmv20_is_accessible_reg,
index bbadf72..1f02f60 100644 (file)
@@ -173,7 +173,7 @@ scmi_config_linear_regulator_mappings(struct scmi_regulator *sreg,
                sreg->desc.uV_step =
                        vinfo->levels_uv[SCMI_VOLTAGE_SEGMENT_STEP];
                sreg->desc.linear_min_sel = 0;
-               sreg->desc.n_voltages = delta_uV / sreg->desc.uV_step;
+               sreg->desc.n_voltages = (delta_uV / sreg->desc.uV_step) + 1;
                sreg->desc.ops = &scmi_reg_linear_ops;
        }
 
index e5daee4..c1404d3 100644 (file)
@@ -459,8 +459,10 @@ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
 
        if (ids)
                for (i = 0; ids[i].name[0]; i++)
-                       if (rpmsg_id_match(rpdev, &ids[i]))
+                       if (rpmsg_id_match(rpdev, &ids[i])) {
+                               rpdev->id.driver_data = ids[i].driver_data;
                                return 1;
+                       }
 
        return of_driver_match_device(dev, drv);
 }
index 1b9e144..fd42a5f 100644 (file)
@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
 }
 
+static int dasd_diag_pe_handler(struct dasd_device *device,
+                               __u8 tbvpm, __u8 fcsecpm)
+{
+       return dasd_generic_verify_path(device, tbvpm);
+}
+
 static struct dasd_discipline dasd_diag_discipline = {
        .owner = THIS_MODULE,
        .name = "DIAG",
        .ebcname = "DIAG",
        .check_device = dasd_diag_check_device,
-       .verify_path = dasd_generic_verify_path,
+       .pe_handler = dasd_diag_pe_handler,
        .fill_geometry = dasd_diag_fill_geometry,
        .setup_blk_queue = dasd_diag_setup_blk_queue,
        .start_IO = dasd_start_diag,
index 4789410..3ad319a 100644 (file)
@@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
        blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
+static int dasd_fba_pe_handler(struct dasd_device *device,
+                              __u8 tbvpm, __u8 fcsecpm)
+{
+       return dasd_generic_verify_path(device, tbvpm);
+}
+
 static struct dasd_discipline dasd_fba_discipline = {
        .owner = THIS_MODULE,
        .name = "FBA ",
        .ebcname = "FBA ",
        .check_device = dasd_fba_check_characteristics,
        .do_analysis = dasd_fba_do_analysis,
-       .verify_path = dasd_generic_verify_path,
+       .pe_handler = dasd_fba_pe_handler,
        .setup_blk_queue = dasd_fba_setup_blk_queue,
        .fill_geometry = dasd_fba_fill_geometry,
        .start_IO = dasd_start_IO,
index 1c59b0e..155428b 100644 (file)
@@ -297,7 +297,6 @@ struct dasd_discipline {
         * e.g. verify that new path is compatible with the current
         * configuration.
         */
-       int (*verify_path)(struct dasd_device *, __u8);
        int (*pe_handler)(struct dasd_device *, __u8, __u8);
 
        /*
index b9febc5..8d1b277 100644 (file)
@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
        int ret;
 
+       /* this is an error in the caller */
+       if (cp->initialized)
+               return -EBUSY;
+
        /*
         * We only support prefetching the channel program. We assume all channel
         * programs executed by supported guests likewise support prefetching.
index 8c625b5..9b61e9b 100644 (file)
@@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
        struct vfio_ccw_private *private;
        struct irb *irb;
        bool is_final;
+       bool cp_is_finished = false;
 
        private = container_of(work, struct vfio_ccw_private, io_work);
        irb = &private->irb;
@@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
                     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
        if (scsw_is_solicited(&irb->scsw)) {
                cp_update_scsw(&private->cp, &irb->scsw);
-               if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
+               if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
                        cp_free(&private->cp);
+                       cp_is_finished = true;
+               }
        }
        mutex_lock(&private->io_mutex);
        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
        mutex_unlock(&private->io_mutex);
 
-       if (private->mdev && is_final)
+       /*
+        * Reset to IDLE only if processing of a channel program
+        * has finished. Do not overwrite a possible processing
+        * state if the final interrupt was for HSCH or CSCH.
+        */
+       if (private->mdev && cp_is_finished)
                private->state = VFIO_CCW_STATE_IDLE;
 
        if (private->io_trigger)
index 23e61aa..e435a9c 100644 (file)
@@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
        }
 
 err_out:
+       private->state = VFIO_CCW_STATE_IDLE;
        trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
                                      io_region->ret_code, errstr);
 }
index 491a64c..c57d2a7 100644 (file)
@@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
        }
 
        vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
-       if (region->ret_code != 0)
-               private->state = VFIO_CCW_STATE_IDLE;
        ret = (region->ret_code != 0) ? region->ret_code : count;
 
 out_unlock:
index 924d55a..65182ad 100644 (file)
@@ -58,7 +58,6 @@
 #include "aicasm_symbol.h"
 #include "aicasm_insformat.h"
 
-int yylineno;
 char *yyfilename;
 char stock_prefix[] = "aic_";
 char *prefix = stock_prefix;
index 7bf7fd5..ed3bdd4 100644 (file)
@@ -108,7 +108,7 @@ struct macro_arg {
        regex_t arg_regex;
        char   *replacement_text;
 };
-STAILQ_HEAD(macro_arg_list, macro_arg) args;
+STAILQ_HEAD(macro_arg_list, macro_arg);
 
 struct macro_info {
        struct macro_arg_list args;
index a7515c3..53343a6 100644 (file)
@@ -3,6 +3,17 @@
  * $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
  */
 
+/* Messages (1 byte) */                     /* I/T (M)andatory or (O)ptional */
+#define MSG_SAVEDATAPOINTER    0x02 /* O/O */
+#define MSG_RESTOREPOINTERS    0x03 /* O/O */
+#define MSG_DISCONNECT         0x04 /* O/O */
+#define MSG_MESSAGE_REJECT     0x07 /* M/M */
+#define MSG_NOOP               0x08 /* M/M */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG       0x20 /* O/O */
+#define MSG_IGN_WIDE_RESIDUE   0x23 /* O/O */
+
 /* Identify message */              /* M/M */  
 #define MSG_IDENTIFYFLAG       0x80 
 #define MSG_IDENTIFY_DISCFLAG  0x40 
index 1a0dc18..ed300a2 100644 (file)
@@ -1220,6 +1220,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                   was a result from the ABTS request rather than the CLEANUP
                   request */
                set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+               rc = FAILED;
                goto done;
        }
 
index 499c770..e954083 100644 (file)
@@ -4811,14 +4811,14 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
 {
        int i;
 
-       free_irq(pci_irq_vector(pdev, 1), hisi_hba);
-       free_irq(pci_irq_vector(pdev, 2), hisi_hba);
-       free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+       devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
+       devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
+       devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
        for (i = 0; i < hisi_hba->cq_nvecs; i++) {
                struct hisi_sas_cq *cq = &hisi_hba->cq[i];
                int nr = hisi_sas_intr_conv ? 16 : 16 + i;
 
-               free_irq(pci_irq_vector(pdev, nr), cq);
+               devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
        }
        pci_free_irq_vectors(pdev);
 }
index 697c09e..cd52664 100644 (file)
@@ -254,12 +254,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
        device_enable_async_suspend(&shost->shost_dev);
 
+       get_device(&shost->shost_gendev);
        error = device_add(&shost->shost_dev);
        if (error)
                goto out_del_gendev;
 
-       get_device(&shost->shost_gendev);
-
        if (shost->transportt->host_size) {
                shost->shost_data = kzalloc(shost->transportt->host_size,
                                         GFP_KERNEL);
@@ -278,33 +277,36 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
                if (!shost->work_q) {
                        error = -EINVAL;
-                       goto out_free_shost_data;
+                       goto out_del_dev;
                }
        }
 
        error = scsi_sysfs_add_host(shost);
        if (error)
-               goto out_destroy_host;
+               goto out_del_dev;
 
        scsi_proc_host_add(shost);
        scsi_autopm_put_host(shost);
        return error;
 
- out_destroy_host:
-       if (shost->work_q)
-               destroy_workqueue(shost->work_q);
- out_free_shost_data:
-       kfree(shost->shost_data);
+       /*
+        * Any host allocation in this function will be freed in
+        * scsi_host_dev_release().
+        */
  out_del_dev:
        device_del(&shost->shost_dev);
  out_del_gendev:
+       /*
+        * Host state is SHOST_RUNNING so we have to explicitly release
+        * ->shost_dev.
+        */
+       put_device(&shost->shost_dev);
        device_del(&shost->shost_gendev);
  out_disable_runtime_pm:
        device_disable_async_suspend(&shost->shost_gendev);
        pm_runtime_disable(&shost->shost_gendev);
        pm_runtime_set_suspended(&shost->shost_gendev);
        pm_runtime_put_noidle(&shost->shost_gendev);
-       scsi_mq_destroy_tags(shost);
  fail:
        return error;
 }
@@ -345,7 +347,7 @@ static void scsi_host_dev_release(struct device *dev)
 
        ida_simple_remove(&host_index_ida, shost->host_no);
 
-       if (parent)
+       if (shost->shost_state != SHOST_CREATED)
                put_device(parent);
        kfree(shost);
 }
@@ -388,8 +390,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        mutex_init(&shost->scan_mutex);
 
        index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
-       if (index < 0)
-               goto fail_kfree;
+       if (index < 0) {
+               kfree(shost);
+               return NULL;
+       }
        shost->host_no = index;
 
        shost->dma_channel = 0xff;
@@ -481,7 +485,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
                shost_printk(KERN_WARNING, shost,
                        "error handler thread failed to spawn, error = %ld\n",
                        PTR_ERR(shost->ehandler));
-               goto fail_index_remove;
+               goto fail;
        }
 
        shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -490,17 +494,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        if (!shost->tmf_work_q) {
                shost_printk(KERN_WARNING, shost,
                             "failed to create tmf workq\n");
-               goto fail_kthread;
+               goto fail;
        }
        scsi_proc_hostdir_add(shost->hostt);
        return shost;
+ fail:
+       /*
+        * Host state is still SHOST_CREATED and that is enough to release
+        * ->shost_gendev. scsi_host_dev_release() will free
+        * dev_name(&shost->shost_dev).
+        */
+       put_device(&shost->shost_gendev);
 
- fail_kthread:
-       kthread_stop(shost->ehandler);
- fail_index_remove:
-       ida_simple_remove(&host_index_ida, shost->host_no);
- fail_kfree:
-       kfree(shost);
        return NULL;
 }
 EXPORT_SYMBOL(scsi_host_alloc);
index 19cf418..e3d03d7 100644 (file)
@@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
 
 static void sas_resume_port(struct asd_sas_phy *phy)
 {
-       struct domain_device *dev;
+       struct domain_device *dev, *n;
        struct asd_sas_port *port = phy->port;
        struct sas_ha_struct *sas_ha = phy->ha;
        struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
@@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
         * 1/ presume every device came back
         * 2/ force the next revalidation to check all expander phys
         */
-       list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+       list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
                int i, rc;
 
                rc = sas_notify_lldd_dev_found(dev);
index 573c859..fc3682f 100644 (file)
@@ -20589,10 +20589,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        abtswqe = &abtsiocb->wqe;
        memset(abtswqe, 0, sizeof(*abtswqe));
 
-       if (lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba))
                bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
-       else
-               bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0);
        bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
        abtswqe->abort_cmd.rsrvd5 = 0;
        abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
index 7562311..b92570a 100644 (file)
@@ -1827,22 +1827,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
                fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
                QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
                           "WWPN (0x%s) already exists.\n", buf);
-               goto err1;
+               return rc;
        }
 
        if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
                QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
                           "because link is not up.\n");
-               rc = -EIO;
-               goto err1;
+               return -EIO;
        }
 
        vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
        if (!vn_port) {
                QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
                           "for vport.\n");
-               rc = -ENOMEM;
-               goto err1;
+               return -ENOMEM;
        }
 
        fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1866,7 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        if (rc) {
                QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
                    "for lport stats.\n");
-               goto err2;
+               goto err;
        }
 
        fc_set_wwnn(vn_port, vport->node_name);
@@ -1884,7 +1882,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        if (rc) {
                QEDF_WARN(&base_qedf->dbg_ctx,
                          "Error adding Scsi_Host rc=0x%x.\n", rc);
-               goto err2;
+               goto err;
        }
 
        /* Set default dev_loss_tmo based on module parameter */
@@ -1925,9 +1923,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
        vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
 
-err2:
+       return 0;
+
+err:
        scsi_host_put(vn_port->host);
-err1:
        return rc;
 }
 
@@ -1968,8 +1967,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
        fc_lport_free_stats(vn_port);
 
        /* Release Scsi_Host */
-       if (vn_port->host)
-               scsi_host_put(vn_port->host);
+       scsi_host_put(vn_port->host);
 
 out:
        return 0;
index b2008fb..12a6848 100644 (file)
@@ -1563,10 +1563,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
                return;
        }
 
+       mutex_lock(&tgt->ha->optrom_mutex);
        mutex_lock(&vha->vha_tgt.tgt_mutex);
        tgt->tgt_stop = 0;
        tgt->tgt_stopped = 1;
        mutex_unlock(&vha->vha_tgt.tgt_mutex);
+       mutex_unlock(&tgt->ha->optrom_mutex);
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
            tgt);
index d92cec1..d33355a 100644 (file)
@@ -184,6 +184,7 @@ static struct {
        {"HP", "C3323-300", "4269", BLIST_NOTQ},
        {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
        {"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+       {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
        {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
        {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
        {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
index aee3cfc..0a84ec9 100644 (file)
@@ -603,11 +603,23 @@ static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
 
        ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
        if (!ret) {
-               if (ver >= UFS_UNIPRO_VER_1_8)
+               if (ver >= UFS_UNIPRO_VER_1_8) {
                        host->hw_ver.major = 3;
+                       /*
+                        * Fix HCI version for some platforms with
+                        * incorrect version
+                        */
+                       if (hba->ufs_version < ufshci_version(3, 0))
+                               hba->ufs_version = ufshci_version(3, 0);
+               }
        }
 }
 
+static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
+{
+       return hba->ufs_version;
+}
+
 /**
  * ufs_mtk_init - find other essential mmio bases
  * @hba: host controller instance
@@ -1048,6 +1060,7 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
        .name                = "mediatek.ufshci",
        .init                = ufs_mtk_init,
+       .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
        .setup_clocks        = ufs_mtk_setup_clocks,
        .hce_enable_notify   = ufs_mtk_hce_enable_notify,
        .link_startup_notify = ufs_mtk_link_startup_notify,
index 8a79605..b9969fc 100644 (file)
@@ -585,7 +585,13 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
                case BTSTAT_SUCCESS:
                case BTSTAT_LINKED_COMMAND_COMPLETED:
                case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
-                       /* If everything went fine, let's move on..  */
+                       /*
+                        * Commands like INQUIRY may transfer less data than
+                        * requested by the initiator via bufflen. Set residual
+                        * count to make upper layer aware of the actual amount
+                        * of data returned.
+                        */
+                       scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
                        cmd->result = (DID_OK << 16);
                        break;
 
index e195747..6dd1902 100644 (file)
@@ -626,10 +626,8 @@ static int meson_msr_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(base)) {
-               dev_err(&pdev->dev, "io resource mapping failed\n");
+       if (IS_ERR(base))
                return PTR_ERR(base);
-       }
 
        priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
                                             &meson_clk_msr_regmap_config);
index 2827085..0ef79d6 100644 (file)
@@ -1150,8 +1150,16 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
 
        ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
                                        bp_mode, nports);
-       if (ret)
-               return ret;
+       if (ret) {
+               u32 version;
+
+               ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version);
+
+               if (version <= 0x01030000)
+                       memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
+               else
+                       return ret;
+       }
 
        memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
        of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports);
index 8965fe6..fe40626 100644 (file)
@@ -68,7 +68,7 @@
 #define BCM2835_SPI_FIFO_SIZE          64
 #define BCM2835_SPI_FIFO_SIZE_3_4      48
 #define BCM2835_SPI_DMA_MIN_LENGTH     96
-#define BCM2835_SPI_NUM_CS               /* raise as necessary */
+#define BCM2835_SPI_NUM_CS             24  /* raise as necessary */
 #define BCM2835_SPI_MODE_BITS  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
                                | SPI_NO_CS | SPI_3WIRE)
 
@@ -1195,6 +1195,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
        struct gpio_chip *chip;
        u32 cs;
 
+       if (spi->chip_select >= BCM2835_SPI_NUM_CS) {
+               dev_err(&spi->dev, "only %d chip-selects supported\n",
+                       BCM2835_SPI_NUM_CS - 1);
+               return -EINVAL;
+       }
+
        /*
         * Precalculate SPI slave's CS register value for ->prepare_message():
         * The driver always uses software-controlled GPIO chip select, hence
@@ -1288,7 +1294,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
        ctlr->use_gpio_descriptors = true;
        ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
        ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
-       ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
+       ctlr->num_chipselect = 3;
        ctlr->setup = bcm2835_spi_setup;
        ctlr->transfer_one = bcm2835_spi_transfer_one;
        ctlr->handle_err = bcm2835_spi_handle_err;
index 6a6af85..27d0087 100644 (file)
@@ -184,6 +184,8 @@ int spi_bitbang_setup(struct spi_device *spi)
 {
        struct spi_bitbang_cs   *cs = spi->controller_state;
        struct spi_bitbang      *bitbang;
+       bool                    initial_setup = false;
+       int                     retval;
 
        bitbang = spi_master_get_devdata(spi->master);
 
@@ -192,22 +194,30 @@ int spi_bitbang_setup(struct spi_device *spi)
                if (!cs)
                        return -ENOMEM;
                spi->controller_state = cs;
+               initial_setup = true;
        }
 
        /* per-word shift register access, in hardware or bitbanging */
        cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
-       if (!cs->txrx_word)
-               return -EINVAL;
+       if (!cs->txrx_word) {
+               retval = -EINVAL;
+               goto err_free;
+       }
 
        if (bitbang->setup_transfer) {
-               int retval = bitbang->setup_transfer(spi, NULL);
+               retval = bitbang->setup_transfer(spi, NULL);
                if (retval < 0)
-                       return retval;
+                       goto err_free;
        }
 
        dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
 
        return 0;
+
+err_free:
+       if (initial_setup)
+               kfree(cs);
+       return retval;
 }
 EXPORT_SYMBOL_GPL(spi_bitbang_setup);
 
index d0e5aa1..bdf94cc 100644 (file)
@@ -440,6 +440,7 @@ static int fsl_spi_setup(struct spi_device *spi)
 {
        struct mpc8xxx_spi *mpc8xxx_spi;
        struct fsl_spi_reg __iomem *reg_base;
+       bool initial_setup = false;
        int retval;
        u32 hw_mode;
        struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
@@ -452,6 +453,7 @@ static int fsl_spi_setup(struct spi_device *spi)
                if (!cs)
                        return -ENOMEM;
                spi_set_ctldata(spi, cs);
+               initial_setup = true;
        }
        mpc8xxx_spi = spi_master_get_devdata(spi->master);
 
@@ -475,6 +477,8 @@ static int fsl_spi_setup(struct spi_device *spi)
        retval = fsl_spi_setup_transfer(spi, NULL);
        if (retval < 0) {
                cs->hw_mode = hw_mode; /* Restore settings */
+               if (initial_setup)
+                       kfree(cs);
                return retval;
        }
 
index 71402f7..df28c66 100644 (file)
@@ -424,15 +424,22 @@ done:
 static int uwire_setup(struct spi_device *spi)
 {
        struct uwire_state *ust = spi->controller_state;
+       bool initial_setup = false;
+       int status;
 
        if (ust == NULL) {
                ust = kzalloc(sizeof(*ust), GFP_KERNEL);
                if (ust == NULL)
                        return -ENOMEM;
                spi->controller_state = ust;
+               initial_setup = true;
        }
 
-       return uwire_setup_transfer(spi, NULL);
+       status = uwire_setup_transfer(spi, NULL);
+       if (status && initial_setup)
+               kfree(ust);
+
+       return status;
 }
 
 static void uwire_cleanup(struct spi_device *spi)
index 999c227..ede7f05 100644 (file)
@@ -1032,8 +1032,22 @@ static void omap2_mcspi_release_dma(struct spi_master *master)
        }
 }
 
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+       struct omap2_mcspi_cs   *cs;
+
+       if (spi->controller_state) {
+               /* Unlink controller state from context save list */
+               cs = spi->controller_state;
+               list_del(&cs->node);
+
+               kfree(cs);
+       }
+}
+
 static int omap2_mcspi_setup(struct spi_device *spi)
 {
+       bool                    initial_setup = false;
        int                     ret;
        struct omap2_mcspi      *mcspi = spi_master_get_devdata(spi->master);
        struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1051,35 +1065,28 @@ static int omap2_mcspi_setup(struct spi_device *spi)
                spi->controller_state = cs;
                /* Link this to context save list */
                list_add_tail(&cs->node, &ctx->cs);
+               initial_setup = true;
        }
 
        ret = pm_runtime_get_sync(mcspi->dev);
        if (ret < 0) {
                pm_runtime_put_noidle(mcspi->dev);
+               if (initial_setup)
+                       omap2_mcspi_cleanup(spi);
 
                return ret;
        }
 
        ret = omap2_mcspi_setup_transfer(spi, NULL);
+       if (ret && initial_setup)
+               omap2_mcspi_cleanup(spi);
+
        pm_runtime_mark_last_busy(mcspi->dev);
        pm_runtime_put_autosuspend(mcspi->dev);
 
        return ret;
 }
 
-static void omap2_mcspi_cleanup(struct spi_device *spi)
-{
-       struct omap2_mcspi_cs   *cs;
-
-       if (spi->controller_state) {
-               /* Unlink controller state from context save list */
-               cs = spi->controller_state;
-               list_del(&cs->node);
-
-               kfree(cs);
-       }
-}
-
 static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
 {
        struct omap2_mcspi *mcspi = data;
index 5e59ba0..8ee0cc0 100644 (file)
@@ -1254,6 +1254,8 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
                chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
 
                err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
+               if (err)
+                       gpiod_put(chip->gpiod_cs);
        }
 
        return err;
@@ -1267,6 +1269,7 @@ static int setup(struct spi_device *spi)
        struct driver_data *drv_data =
                spi_controller_get_devdata(spi->controller);
        uint tx_thres, tx_hi_thres, rx_thres;
+       int err;
 
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
@@ -1413,7 +1416,11 @@ static int setup(struct spi_device *spi)
        if (drv_data->ssp_type == CE4100_SSP)
                return 0;
 
-       return setup_cs(spi, chip, chip_info);
+       err = setup_cs(spi, chip, chip_info);
+       if (err)
+               kfree(chip);
+
+       return err;
 }
 
 static void cleanup(struct spi_device *spi)
index 7e640cc..594f641 100644 (file)
@@ -294,7 +294,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
        int err = 0;
 
        if (!op->data.nbytes)
-               return stm32_qspi_wait_nobusy(qspi);
+               goto wait_nobusy;
 
        if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
                goto out;
@@ -315,6 +315,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
 out:
        /* clear flags */
        writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+wait_nobusy:
+       if (!err)
+               err = stm32_qspi_wait_nobusy(qspi);
 
        return err;
 }
index 5a3d81c..9262c64 100644 (file)
@@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        xqspi->irq = platform_get_irq(pdev, 0);
        if (xqspi->irq <= 0) {
                ret = -ENXIO;
-               goto remove_master;
+               goto clk_dis_all;
        }
        ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
                               0, pdev->name, xqspi);
        if (ret != 0) {
                ret = -ENXIO;
                dev_err(&pdev->dev, "request_irq failed\n");
-               goto remove_master;
+               goto clk_dis_all;
        }
 
        ret = of_property_read_u32(np, "num-cs",
@@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
        if (ret < 0) {
                ctlr->num_chipselect = 1;
        } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
+               ret = -EINVAL;
                dev_err(&pdev->dev, "only 2 chip selects are available\n");
-               goto remove_master;
+               goto clk_dis_all;
        } else {
                ctlr->num_chipselect = num_cs;
        }
index 741147a..ecc5c9d 100644 (file)
@@ -2064,7 +2064,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
                        struct nbu2ss_ep *ep,
                        int status)
 {
-       struct nbu2ss_req *req;
+       struct nbu2ss_req *req, *n;
 
        /* Endpoint Disable */
        _nbu2ss_epn_exit(udc, ep);
@@ -2076,7 +2076,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
                return 0;
 
        /* called with irqs blocked */
-       list_for_each_entry(req, &ep->queue, queue) {
+       list_for_each_entry_safe(req, n, &ep->queue, queue) {
                _nbu2ss_ep_done(ep, req, status);
        }
 
index dfd71e9..eab534d 100644 (file)
@@ -700,7 +700,6 @@ static int ad7746_probe(struct i2c_client *client,
                indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
        else
                indio_dev->num_channels =  ARRAY_SIZE(ad7746_channels) - 2;
-       indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
        indio_dev->modes = INDIO_DIRECT_MODE;
 
        if (pdata) {
index 33e28cc..b5229bc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- *  Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
  *  GDMA4740 DMAC support
  */
 
@@ -914,6 +913,5 @@ static struct platform_driver gdma_dma_driver = {
 };
 module_platform_driver(gdma_dma_driver);
 
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("Ralink/MTK DMA driver");
 MODULE_LICENSE("GPL v2");
index a6d731e..4378592 100644 (file)
@@ -2091,7 +2091,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
        struct net_device *ndev = padapter->pnetdev;
 
        {
-               struct station_info sinfo;
+               struct station_info sinfo = {};
                u8 ie_offset;
                if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
                        ie_offset = _ASOCREQ_IE_OFFSET_;
@@ -2284,7 +2284,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
        mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
        mon_ndev->ieee80211_ptr = mon_wdev;
 
-       ret = register_netdevice(mon_ndev);
+       ret = cfg80211_register_netdevice(mon_ndev);
        if (ret) {
                goto out;
        }
@@ -2360,7 +2360,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
        adapter = rtw_netdev_priv(ndev);
        pwdev_priv = adapter_wdev_data(adapter);
 
-       unregister_netdevice(ndev);
+       cfg80211_unregister_netdevice(ndev);
 
        if (ndev == pwdev_priv->pmon_ndev) {
                pwdev_priv->pmon_ndev = NULL;
index d6fdd1c..a526f96 100644 (file)
@@ -204,11 +204,11 @@ static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
        struct iblock_dev_plug *ib_dev_plug;
 
        /*
-        * Each se_device has a per cpu work this can be run from. Wwe
+        * Each se_device has a per cpu work this can be run from. We
         * shouldn't have multiple threads on the same cpu calling this
         * at the same time.
         */
-       ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()];
+       ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
        if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
                return NULL;
 
index 8fbfe75..7e35edd 100644 (file)
@@ -1416,7 +1416,7 @@ void __target_init_cmd(
        cmd->orig_fe_lun = unpacked_lun;
 
        if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
-               cmd->cpuid = smp_processor_id();
+               cmd->cpuid = raw_smp_processor_id();
 
        cmd->state_active = false;
 }
@@ -3121,9 +3121,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
        __releases(&cmd->t_state_lock)
        __acquires(&cmd->t_state_lock)
 {
-
-       assert_spin_locked(&cmd->t_state_lock);
-       WARN_ON_ONCE(!irqs_disabled());
+       lockdep_assert_held(&cmd->t_state_lock);
 
        if (fabric_stop)
                cmd->transport_state |= CMD_T_FABRIC_STOP;
index 198d25a..4bba10e 100644 (file)
@@ -516,8 +516,10 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
        dpi = dbi * udev->data_pages_per_blk;
        /* Count the number of already allocated pages */
        xas_set(&xas, dpi);
+       rcu_read_lock();
        for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
                cnt++;
+       rcu_read_unlock();
 
        for (i = cnt; i < page_cnt; i++) {
                /* try to get new page from the mm */
@@ -699,11 +701,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
                                  struct scatterlist *sg, unsigned int sg_nents,
                                  struct iovec **iov, size_t data_len)
 {
-       XA_STATE(xas, &udev->data_pages, 0);
        /* start value of dbi + 1 must not be a valid dbi */
        int dbi = -2;
        size_t page_remaining, cp_len;
-       int page_cnt, page_inx;
+       int page_cnt, page_inx, dpi;
        struct sg_mapping_iter sg_iter;
        unsigned int sg_flags;
        struct page *page;
@@ -726,9 +727,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
                if (page_cnt > udev->data_pages_per_blk)
                        page_cnt = udev->data_pages_per_blk;
 
-               xas_set(&xas, dbi * udev->data_pages_per_blk);
-               for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) {
-                       page = xas_next(&xas);
+               dpi = dbi * udev->data_pages_per_blk;
+               for (page_inx = 0; page_inx < page_cnt && data_len;
+                    page_inx++, dpi++) {
+                       page = xa_load(&udev->data_pages, dpi);
 
                        if (direction == TCMU_DATA_AREA_TO_SG)
                                flush_dcache_page(page);
index 6132cc8..6e6eb83 100644 (file)
@@ -220,6 +220,7 @@ int optee_open_session(struct tee_context *ctx,
        struct optee_msg_arg *msg_arg;
        phys_addr_t msg_parg;
        struct optee_session *sess = NULL;
+       uuid_t client_uuid;
 
        /* +2 for the meta parameters added below */
        shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
@@ -240,10 +241,11 @@ int optee_open_session(struct tee_context *ctx,
        memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
        msg_arg->params[1].u.value.c = arg->clnt_login;
 
-       rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
-                                         arg->clnt_login, arg->clnt_uuid);
+       rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
+                                         arg->clnt_uuid);
        if (rc)
                goto out;
+       export_uuid(msg_arg->params[1].u.octets, &client_uuid);
 
        rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
        if (rc)
index 81ff593..e3d72d0 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/types.h>
 
 /*
- * This file defines the OP-TEE message protocol used to communicate
+ * This file defines the OP-TEE message protocol (ABI) used to communicate
  * with an instance of OP-TEE running in secure world.
  *
  * This file is divided into two sections.
@@ -144,9 +144,10 @@ struct optee_msg_param_value {
  * @tmem:      parameter by temporary memory reference
  * @rmem:      parameter by registered memory reference
  * @value:     parameter by opaque value
+ * @octets:    parameter by octet string
  *
  * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
  * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
  * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
  * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
@@ -157,6 +158,7 @@ struct optee_msg_param {
                struct optee_msg_param_tmem tmem;
                struct optee_msg_param_rmem rmem;
                struct optee_msg_param_value value;
+               u8 octets[24];
        } u;
 };
 
index d1248ba..62c0aa5 100644 (file)
@@ -237,6 +237,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
        if (ACPI_FAILURE(status))
                trip_cnt = 0;
        else {
+               int i;
+
                int34x_thermal_zone->aux_trips =
                        kcalloc(trip_cnt,
                                sizeof(*int34x_thermal_zone->aux_trips),
@@ -247,6 +249,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
                }
                trip_mask = BIT(trip_cnt) - 1;
                int34x_thermal_zone->aux_trip_nr = trip_cnt;
+               for (i = 0; i < trip_cnt; ++i)
+                       int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID;
        }
 
        trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
index f8e8825..99abdc0 100644 (file)
@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void)
        return atomic_read(&therm_throt_en);
 }
 
+void __init therm_lvt_init(void)
+{
+       /*
+        * This function is only called on boot CPU. Save the init thermal
+        * LVT value on BSP and use that value to restore APs' thermal LVT
+        * entry BIOS programmed later
+        */
+       if (intel_thermal_supported(&boot_cpu_data))
+               lvtthmr_init = apic_read(APIC_LVTTHMR);
+}
+
 void intel_init_thermal(struct cpuinfo_x86 *c)
 {
        unsigned int cpu = smp_processor_id();
@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
        if (!intel_thermal_supported(c))
                return;
 
-       /* On the BSP? */
-       if (c == &boot_cpu_data)
-               lvtthmr_init = apic_read(APIC_LVTTHMR);
-
        /*
         * First check if its enabled already, in which case there might
         * be some SMM goo which handles it, so we can't even put a handler
index 295742e..4d8edc6 100644 (file)
@@ -166,7 +166,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd,
        if (thres_reg_value)
                *temp = zonedev->tj_max - thres_reg_value * 1000;
        else
-               *temp = 0;
+               *temp = THERMAL_TEMP_INVALID;
        pr_debug("sys_get_trip_temp %d\n", *temp);
 
        return 0;
index b460b56..232fd0b 100644 (file)
@@ -441,7 +441,7 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
 
        if (args.args_count != 1 || args.args[0] >= ADC5_MAX_CHANNEL) {
                dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan);
-               return ret;
+               return -EINVAL;
        }
        channel->adc_channel = args.args[0];
 
index ebe7cb7..ea0603b 100644 (file)
@@ -770,7 +770,7 @@ static int ti_bandgap_tshut_init(struct ti_bandgap *bgp,
 }
 
 /**
- * ti_bandgap_alert_init() - setup and initialize talert handling
+ * ti_bandgap_talert_init() - setup and initialize talert handling
  * @bgp: pointer to struct ti_bandgap
  * @pdev: pointer to device struct platform_device
  *
index 7288aaf..5631319 100644 (file)
@@ -366,15 +366,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
                        void *buf, size_t size)
 {
        unsigned int retries = DMA_PORT_RETRIES;
-       unsigned int offset;
-
-       offset = address & 3;
-       address = address & ~3;
 
        do {
-               u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+               unsigned int offset;
+               size_t nbytes;
                int ret;
 
+               offset = address & 3;
+               nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
+
                ret = dma_port_flash_read_block(dma, address, dma->buf,
                                                ALIGN(nbytes, 4));
                if (ret) {
@@ -386,6 +386,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
                        return ret;
                }
 
+               nbytes -= offset;
                memcpy(buf, dma->buf + offset, nbytes);
 
                size -= nbytes;
index 680bc73..671d72a 100644 (file)
@@ -68,15 +68,15 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
        unsigned int retries = USB4_DATA_RETRIES;
        unsigned int offset;
 
-       offset = address & 3;
-       address = address & ~3;
-
        do {
-               size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
                unsigned int dwaddress, dwords;
                u8 data[USB4_DATA_DWORDS * 4];
+               size_t nbytes;
                int ret;
 
+               offset = address & 3;
+               nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
+
                dwaddress = address / 4;
                dwords = ALIGN(nbytes, 4) / 4;
 
@@ -87,6 +87,7 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size,
                        return ret;
                }
 
+               nbytes -= offset;
                memcpy(buf, data + offset, nbytes);
 
                size -= nbytes;
index 52bb212..6473361 100644 (file)
@@ -7,6 +7,7 @@
  *  Copyright (C) 2001 Russell King.
  */
 
+#include <linux/bits.h>
 #include <linux/serial_8250.h>
 #include <linux/serial_reg.h>
 #include <linux/dmaengine.h>
@@ -70,24 +71,25 @@ struct serial8250_config {
        unsigned int    flags;
 };
 
-#define UART_CAP_FIFO  (1 << 8)        /* UART has FIFO */
-#define UART_CAP_EFR   (1 << 9)        /* UART has EFR */
-#define UART_CAP_SLEEP (1 << 10)       /* UART has IER sleep */
-#define UART_CAP_AFE   (1 << 11)       /* MCR-based hw flow control */
-#define UART_CAP_UUE   (1 << 12)       /* UART needs IER bit 6 set (Xscale) */
-#define UART_CAP_RTOIE (1 << 13)       /* UART needs IER bit 4 set (Xscale, Tegra) */
-#define UART_CAP_HFIFO (1 << 14)       /* UART has a "hidden" FIFO */
-#define UART_CAP_RPM   (1 << 15)       /* Runtime PM is active while idle */
-#define UART_CAP_IRDA  (1 << 16)       /* UART supports IrDA line discipline */
-#define UART_CAP_MINI  (1 << 17)       /* Mini UART on BCM283X family lacks:
+#define UART_CAP_FIFO  BIT(8)  /* UART has FIFO */
+#define UART_CAP_EFR   BIT(9)  /* UART has EFR */
+#define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */
+#define UART_CAP_AFE   BIT(11) /* MCR-based hw flow control */
+#define UART_CAP_UUE   BIT(12) /* UART needs IER bit 6 set (Xscale) */
+#define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */
+#define UART_CAP_RPM   BIT(15) /* Runtime PM is active while idle */
+#define UART_CAP_IRDA  BIT(16) /* UART supports IrDA line discipline */
+#define UART_CAP_MINI  BIT(17) /* Mini UART on BCM283X family lacks:
                                         * STOP PARITY EPAR SPAR WLEN5 WLEN6
                                         */
 
-#define UART_BUG_QUOT  (1 << 0)        /* UART has buggy quot LSB */
-#define UART_BUG_TXEN  (1 << 1)        /* UART has buggy TX IIR status */
-#define UART_BUG_NOMSR (1 << 2)        /* UART has buggy MSR status bits (Au1x00) */
-#define UART_BUG_THRE  (1 << 3)        /* UART has buggy THRE reassertion */
-#define UART_BUG_PARITY        (1 << 4)        /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_QUOT  BIT(0)  /* UART has buggy quot LSB */
+#define UART_BUG_TXEN  BIT(1)  /* UART has buggy TX IIR status */
+#define UART_BUG_NOMSR BIT(2)  /* UART has buggy MSR status bits (Au1x00) */
+#define UART_BUG_THRE  BIT(3)  /* UART has buggy THRE reassertion */
+#define UART_BUG_PARITY        BIT(4)  /* UART mishandles parity if FIFO enabled */
+#define UART_BUG_TXRACE        BIT(5)  /* UART Tx fails to set remote DR */
 
 
 #ifdef CONFIG_SERIAL_8250_SHARE_IRQ
index 61550f2..d035d08 100644 (file)
@@ -437,6 +437,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
        port.port.status = UPSTAT_SYNC_FIFO;
        port.port.dev = &pdev->dev;
        port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+       port.bugs |= UART_BUG_TXRACE;
 
        rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
        if (rc < 0)
index 9e204f9..a3a0154 100644 (file)
@@ -714,6 +714,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
        { "APMC0D08", 0},
        { "AMD0020", 0 },
        { "AMDI0020", 0 },
+       { "AMDI0022", 0 },
        { "BRCM2032", 0 },
        { "HISI0031", 0 },
        { },
index 2f49c58..bd4e9f6 100644 (file)
@@ -553,7 +553,11 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
 {
        struct exar8250 *priv = pci_get_drvdata(pcidev);
        struct uart_8250_port *port = serial8250_get_port(priv->line[0]);
-       struct platform_device *pdev = port->port.private_data;
+       struct platform_device *pdev;
+
+       pdev = port->port.private_data;
+       if (!pdev)
+               return;
 
        device_remove_software_node(&pdev->dev);
        platform_device_unregister(pdev);
index 689d822..780cc99 100644 (file)
@@ -56,6 +56,8 @@ struct serial_private {
        int                     line[];
 };
 
+#define PCI_DEVICE_ID_HPE_PCI_SERIAL   0x37e
+
 static const struct pci_device_id pci_use_msi[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
                         0xA000, 0x1000) },
@@ -63,6 +65,8 @@ static const struct pci_device_id pci_use_msi[] = {
                         0xA000, 0x1000) },
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
                         0xA000, 0x1000) },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+                        PCI_ANY_ID, PCI_ANY_ID) },
        { }
 };
 
@@ -1998,6 +2002,16 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .setup          = pci_hp_diva_setup,
        },
        /*
+        * HPE PCI serial device
+        */
+       {
+               .vendor         = PCI_VENDOR_ID_HP_3PAR,
+               .device         = PCI_DEVICE_ID_HPE_PCI_SERIAL,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_hp_diva_setup,
+       },
+       /*
         * Intel
         */
        {
@@ -3944,21 +3958,26 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
        uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
        uart.port.uartclk = board->base_baud * 16;
 
-       if (pci_match_id(pci_use_msi, dev)) {
-               dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
-               pci_set_master(dev);
-               rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (board->flags & FL_NOIRQ) {
+               uart.port.irq = 0;
        } else {
-               dev_dbg(&dev->dev, "Using legacy interrupts\n");
-               rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
-       }
-       if (rc < 0) {
-               kfree(priv);
-               priv = ERR_PTR(rc);
-               goto err_deinit;
+               if (pci_match_id(pci_use_msi, dev)) {
+                       dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n");
+                       pci_set_master(dev);
+                       rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES);
+               } else {
+                       dev_dbg(&dev->dev, "Using legacy interrupts\n");
+                       rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+               }
+               if (rc < 0) {
+                       kfree(priv);
+                       priv = ERR_PTR(rc);
+                       goto err_deinit;
+               }
+
+               uart.port.irq = pci_irq_vector(dev, 0);
        }
 
-       uart.port.irq = pci_irq_vector(dev, 0);
        uart.port.dev = &dev->dev;
 
        for (i = 0; i < nr_ports; i++) {
@@ -4973,6 +4992,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
        {       PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                pbn_b2_1_115200 },
+       /* HPE PCI serial device */
+       {       PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_b1_1_115200 },
 
        {       PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
index d45dab1..fc5ab20 100644 (file)
@@ -1809,6 +1809,18 @@ void serial8250_tx_chars(struct uart_8250_port *up)
        count = up->tx_loadsz;
        do {
                serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+               if (up->bugs & UART_BUG_TXRACE) {
+                       /*
+                        * The Aspeed BMC virtual UARTs have a bug where data
+                        * may get stuck in the BMC's Tx FIFO from bursts of
+                        * writes on the APB interface.
+                        *
+                        * Delay back-to-back writes by a read cycle to avoid
+                        * stalling the VUART. Read a register that won't have
+                        * side-effects and discard the result.
+                        */
+                       serial_in(up, UART_SCR);
+               }
                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
                port->icount.tx++;
                if (uart_circ_empty(xmit))
index d60abff..6689d8a 100644 (file)
@@ -195,7 +195,6 @@ struct rp2_card {
        void __iomem                    *bar0;
        void __iomem                    *bar1;
        spinlock_t                      card_lock;
-       struct completion               fw_loaded;
 };
 
 #define RP_ID(prod) PCI_VDEVICE(RP, (prod))
@@ -662,17 +661,10 @@ static void rp2_remove_ports(struct rp2_card *card)
        card->initialized_ports = 0;
 }
 
-static void rp2_fw_cb(const struct firmware *fw, void *context)
+static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw)
 {
-       struct rp2_card *card = context;
        resource_size_t phys_base;
-       int i, rc = -ENOENT;
-
-       if (!fw) {
-               dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n",
-                       RP2_FW_NAME);
-               goto no_fw;
-       }
+       int i, rc = 0;
 
        phys_base = pci_resource_start(card->pdev, 1);
 
@@ -718,23 +710,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context)
                card->initialized_ports++;
        }
 
-       release_firmware(fw);
-no_fw:
-       /*
-        * rp2_fw_cb() is called from a workqueue long after rp2_probe()
-        * has already returned success.  So if something failed here,
-        * we'll just leave the now-dormant device in place until somebody
-        * unbinds it.
-        */
-       if (rc)
-               dev_warn(&card->pdev->dev, "driver initialization failed\n");
-
-       complete(&card->fw_loaded);
+       return rc;
 }
 
 static int rp2_probe(struct pci_dev *pdev,
                                   const struct pci_device_id *id)
 {
+       const struct firmware *fw;
        struct rp2_card *card;
        struct rp2_uart_port *ports;
        void __iomem * const *bars;
@@ -745,7 +727,6 @@ static int rp2_probe(struct pci_dev *pdev,
                return -ENOMEM;
        pci_set_drvdata(pdev, card);
        spin_lock_init(&card->card_lock);
-       init_completion(&card->fw_loaded);
 
        rc = pcim_enable_device(pdev);
        if (rc)
@@ -778,21 +759,23 @@ static int rp2_probe(struct pci_dev *pdev,
                return -ENOMEM;
        card->ports = ports;
 
-       rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
-                             IRQF_SHARED, DRV_NAME, card);
-       if (rc)
+       rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "cannot find '%s' firmware image\n",
+                       RP2_FW_NAME);
                return rc;
+       }
 
-       /*
-        * Only catastrophic errors (e.g. ENOMEM) are reported here.
-        * If the FW image is missing, we'll find out in rp2_fw_cb()
-        * and print an error message.
-        */
-       rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev,
-                                    GFP_KERNEL, card, rp2_fw_cb);
+       rc = rp2_load_firmware(card, fw);
+
+       release_firmware(fw);
+       if (rc < 0)
+               return rc;
+
+       rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt,
+                             IRQF_SHARED, DRV_NAME, card);
        if (rc)
                return rc;
-       dev_dbg(&pdev->dev, "waiting for firmware blob...\n");
 
        return 0;
 }
@@ -801,7 +784,6 @@ static void rp2_remove(struct pci_dev *pdev)
 {
        struct rp2_card *card = pci_get_drvdata(pdev);
 
-       wait_for_completion(&card->fw_loaded);
        rp2_remove_ports(card);
 }
 
index bbae072..2220327 100644 (file)
@@ -338,7 +338,7 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
 
        do {
                lsr = tegra_uart_read(tup, UART_LSR);
-               if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
+               if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR))
                        break;
                udelay(1);
        } while (--tmout);
index 87f7127..18ff85a 100644 (file)
@@ -863,9 +863,11 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
                goto check_and_exit;
        }
 
-       retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
-       if (retval && (change_irq || change_port))
-               goto exit;
+       if (change_irq || change_port) {
+               retval = security_locked_down(LOCKDOWN_TIOCSSERIAL);
+               if (retval)
+                       goto exit;
+       }
 
        /*
         * Ask the low level driver to verify the settings.
index ef37fdf..4baf131 100644 (file)
@@ -1023,10 +1023,10 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
 {
        unsigned int bits;
 
+       if (rx_trig >= port->fifosize)
+               rx_trig = port->fifosize - 1;
        if (rx_trig < 1)
                rx_trig = 1;
-       if (rx_trig >= port->fifosize)
-               rx_trig = port->fifosize;
 
        /* HSCIF can be set to an arbitrary level. */
        if (sci_getreg(port, HSRTRGR)->size) {
index 9b1bd41..5281f8d 100644 (file)
@@ -2007,7 +2007,7 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
                else
                        mask = BIT(priv_ep->num);
 
-               if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+               if (priv_ep->type != USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
                        cdns3_set_register_bit(&regs->tdl_from_trb, mask);
                        cdns3_set_register_bit(&regs->tdl_beh, mask);
                        cdns3_set_register_bit(&regs->tdl_beh2, mask);
@@ -2046,15 +2046,13 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        case USB_ENDPOINT_XFER_INT:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
 
-               if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
-                   priv_dev->dev_ver > DEV_VER_V2)
+               if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
                        ep_cfg |= EP_CFG_TDL_CHK;
                break;
        case USB_ENDPOINT_XFER_BULK:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
 
-               if ((priv_dev->dev_ver == DEV_VER_V2  && !priv_ep->dir) ||
-                   priv_dev->dev_ver > DEV_VER_V2)
+               if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
                        ep_cfg |= EP_CFG_TDL_CHK;
                break;
        default:
@@ -3268,8 +3266,10 @@ static int __cdns3_gadget_init(struct cdns *cdns)
        pm_runtime_get_sync(cdns->dev);
 
        ret = cdns3_gadget_start(cdns);
-       if (ret)
+       if (ret) {
+               pm_runtime_put_sync(cdns->dev);
                return ret;
+       }
 
        /*
         * Because interrupt line can be shared with other components in
index 56707b6..c083985 100644 (file)
@@ -422,17 +422,17 @@ unmap:
 int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
 {
        struct cdnsp_device *pdev = pep->pdev;
-       int ret;
+       int ret_stop = 0;
+       int ret_rem;
 
        trace_cdnsp_request_dequeue(preq);
 
-       if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
-               ret = cdnsp_cmd_stop_ep(pdev, pep);
-               if (ret)
-                       return ret;
-       }
+       if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
+               ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
+
+       ret_rem = cdnsp_remove_request(pdev, preq, pep);
 
-       return cdnsp_remove_request(pdev, preq, pep);
+       return ret_rem ? ret_rem : ret_stop;
 }
 
 static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
index 5f0513c..6897274 100644 (file)
@@ -1517,13 +1517,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
 {
        struct cdnsp_device *pdev = (struct cdnsp_device *)data;
        union cdnsp_trb *event_ring_deq;
+       unsigned long flags;
        int counter = 0;
 
-       spin_lock(&pdev->lock);
+       spin_lock_irqsave(&pdev->lock, flags);
 
        if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
                cdnsp_died(pdev);
-               spin_unlock(&pdev->lock);
+               spin_unlock_irqrestore(&pdev->lock, flags);
                return IRQ_HANDLED;
        }
 
@@ -1539,7 +1540,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
 
        cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
 
-       spin_unlock(&pdev->lock);
+       spin_unlock_irqrestore(&pdev->lock, flags);
 
        return IRQ_HANDLED;
 }
index c16d900..393f216 100644 (file)
@@ -2061,6 +2061,7 @@ static int udc_start(struct ci_hdrc *ci)
        ci->gadget.name         = ci->platdata->name;
        ci->gadget.otg_caps     = otg_caps;
        ci->gadget.sg_supported = 1;
+       ci->gadget.irq          = ci->irq;
 
        if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
                ci->gadget.quirk_avoids_skb_reserve = 1;
index 4545b23..bac0f54 100644 (file)
@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
        int val;
        unsigned long flags;
 
+       /* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
+       spin_lock_irqsave(&usbmisc->lock, flags);
+       val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+       val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
+       writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+       spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+       /* TVDMSRC_DIS */
+       msleep(20);
+
        /* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
        spin_lock_irqsave(&usbmisc->lock, flags);
        val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
                                usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
        spin_unlock_irqrestore(&usbmisc->lock, flags);
 
-       usleep_range(1000, 2000);
+       /* TVDMSRC_ON */
+       msleep(40);
 
        /*
         * Per BC 1.2, check voltage of D+:
@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
                                usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
        spin_unlock_irqrestore(&usbmisc->lock, flags);
 
-       usleep_range(1000, 2000);
+       /* TVDPSRC_ON */
+       msleep(40);
 
        /* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
        val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
index 5332363..2218941 100644 (file)
@@ -1218,7 +1218,12 @@ static int do_proc_bulk(struct usb_dev_state *ps,
        ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
        if (ret)
                return ret;
-       tbuf = kmalloc(len1, GFP_KERNEL);
+
+       /*
+        * len1 can be almost arbitrarily large.  Don't WARN if it's
+        * too big, just fail the request.
+        */
+       tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
        if (!tbuf) {
                ret = -ENOMEM;
                goto done;
@@ -1696,7 +1701,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        if (num_sgs) {
                as->urb->sg = kmalloc_array(num_sgs,
                                            sizeof(struct scatterlist),
-                                           GFP_KERNEL);
+                                           GFP_KERNEL | __GFP_NOWARN);
                if (!as->urb->sg) {
                        ret = -ENOMEM;
                        goto error;
@@ -1731,7 +1736,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                                        (uurb_start - as->usbm->vm_start);
                } else {
                        as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
-                                       GFP_KERNEL);
+                                       GFP_KERNEL | __GFP_NOWARN);
                        if (!as->urb->transfer_buffer) {
                                ret = -ENOMEM;
                                goto error;
index fc7d6cd..df8e69e 100644 (file)
@@ -41,6 +41,8 @@
 #define USB_VENDOR_GENESYS_LOGIC               0x05e3
 #define USB_VENDOR_SMSC                                0x0424
 #define USB_PRODUCT_USB5534B                   0x5534
+#define USB_VENDOR_CYPRESS                     0x04b4
+#define USB_PRODUCT_CY7C65632                  0x6570
 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND       0x01
 #define HUB_QUIRK_DISABLE_AUTOSUSPEND          0x02
 
@@ -5698,6 +5700,11 @@ static const struct usb_device_id hub_id_table[] = {
       .bInterfaceClass = USB_CLASS_HUB,
       .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                   | USB_DEVICE_ID_MATCH_PRODUCT,
+      .idVendor = USB_VENDOR_CYPRESS,
+      .idProduct = USB_PRODUCT_CY7C65632,
+      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
                        | USB_DEVICE_ID_MATCH_INT_CLASS,
       .idVendor = USB_VENDOR_GENESYS_LOGIC,
       .bInterfaceClass = USB_CLASS_HUB,
index b6e53d8..4ac397e 100644 (file)
@@ -1671,8 +1671,8 @@ static int dwc3_remove(struct platform_device *pdev)
 
        pm_runtime_get_sync(&pdev->dev);
 
-       dwc3_debugfs_exit(dwc);
        dwc3_core_exit_mode(dwc);
+       dwc3_debugfs_exit(dwc);
 
        dwc3_core_exit(dwc);
        dwc3_ulpi_exit(dwc);
@@ -1690,11 +1690,6 @@ static int dwc3_remove(struct platform_device *pdev)
        return 0;
 }
 
-static void dwc3_shutdown(struct platform_device *pdev)
-{
-       dwc3_remove(pdev);
-}
-
 #ifdef CONFIG_PM
 static int dwc3_core_init_for_resume(struct dwc3 *dwc)
 {
@@ -2012,7 +2007,6 @@ MODULE_DEVICE_TABLE(acpi, dwc3_acpi_match);
 static struct platform_driver dwc3_driver = {
        .probe          = dwc3_probe,
        .remove         = dwc3_remove,
-       .shutdown   = dwc3_shutdown,
        .driver         = {
                .name   = "dwc3",
                .of_match_table = of_match_ptr(of_dwc3_match),
index d0ac89c..d223c54 100644 (file)
@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
 
 
 #ifdef CONFIG_DEBUG_FS
+extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
 extern void dwc3_debugfs_init(struct dwc3 *d);
 extern void dwc3_debugfs_exit(struct dwc3 *d);
 #else
+static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+{  }
 static inline void dwc3_debugfs_init(struct dwc3 *d)
 {  }
 static inline void dwc3_debugfs_exit(struct dwc3 *d)
index 7146ee2..5dbbe53 100644 (file)
@@ -886,30 +886,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
        }
 }
 
-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
-               struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
 {
        struct dentry           *dir;
 
-       dir = debugfs_create_dir(dep->name, parent);
+       dir = debugfs_create_dir(dep->name, dep->dwc->root);
        dwc3_debugfs_create_endpoint_files(dep, dir);
 }
 
-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
-               struct dentry *parent)
-{
-       int                     i;
-
-       for (i = 0; i < dwc->num_eps; i++) {
-               struct dwc3_ep  *dep = dwc->eps[i];
-
-               if (!dep)
-                       continue;
-
-               dwc3_debugfs_create_endpoint_dir(dep, parent);
-       }
-}
-
 void dwc3_debugfs_init(struct dwc3 *dwc)
 {
        struct dentry           *root;
@@ -940,7 +924,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
                                &dwc3_testmode_fops);
                debugfs_create_file("link_state", 0644, root, dwc,
                                    &dwc3_link_state_fops);
-               dwc3_debugfs_create_endpoint_dirs(dwc, root);
        }
 }
 
index bdf1f98..ffe301d 100644 (file)
@@ -651,7 +651,7 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
                return PTR_ERR(priv->usb_glue_regmap);
 
        /* Create a regmap for each USB2 PHY control register set */
-       for (i = 0; i < priv->usb2_ports; i++) {
+       for (i = 0; i < priv->drvdata->num_phys; i++) {
                struct regmap_config u2p_regmap_config = {
                        .reg_bits = 8,
                        .val_bits = 32,
@@ -659,6 +659,9 @@ static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
                        .max_register = U2P_R1,
                };
 
+               if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+                       continue;
+
                u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
                                                        "u2p-%d", i);
                if (!u2p_regmap_config.name)
@@ -772,13 +775,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
 
        ret = priv->drvdata->usb_init(priv);
        if (ret)
-               goto err_disable_clks;
+               goto err_disable_regulator;
 
        /* Init PHYs */
        for (i = 0 ; i < PHY_COUNT ; ++i) {
                ret = phy_init(priv->phys[i]);
                if (ret)
-                       goto err_disable_clks;
+                       goto err_disable_regulator;
        }
 
        /* Set PHY Power */
@@ -816,6 +819,10 @@ err_phys_exit:
        for (i = 0 ; i < PHY_COUNT ; ++i)
                phy_exit(priv->phys[i]);
 
+err_disable_regulator:
+       if (priv->vbus)
+               regulator_disable(priv->vbus);
+
 err_disable_clks:
        clk_bulk_disable_unprepare(priv->drvdata->num_clks,
                                   priv->drvdata->clks);
index 8b668ef..3cd2942 100644 (file)
@@ -292,6 +292,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
                epnum |= 1;
 
        dep = dwc->eps[epnum];
+       if (dep == NULL)
+               return NULL;
+
        if (dep->flags & DWC3_EP_ENABLED)
                return dep;
 
index 49ca5da..f14c2aa 100644 (file)
@@ -1244,6 +1244,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
                        req->start_sg = sg_next(s);
 
                req->num_queued_sgs++;
+               req->num_pending_sgs--;
 
                /*
                 * The number of pending SG entries may not correspond to the
@@ -1251,7 +1252,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
                 * don't include unused SG entries.
                 */
                if (length == 0) {
-                       req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
+                       req->num_pending_sgs = 0;
                        break;
                }
 
@@ -2260,13 +2261,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
        }
 
        /*
-        * Synchronize any pending event handling before executing the controller
-        * halt routine.
+        * Synchronize and disable any further event handling while controller
+        * is being enabled/disabled.
         */
-       if (!is_on) {
-               dwc3_gadget_disable_irq(dwc);
-               synchronize_irq(dwc->irq_gadget);
-       }
+       disable_irq(dwc->irq_gadget);
 
        spin_lock_irqsave(&dwc->lock, flags);
 
@@ -2304,6 +2302,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
        ret = dwc3_gadget_run_stop(dwc, is_on, false);
        spin_unlock_irqrestore(&dwc->lock, flags);
+       enable_irq(dwc->irq_gadget);
+
        pm_runtime_put(dwc->dev);
 
        return ret;
@@ -2753,6 +2753,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
        INIT_LIST_HEAD(&dep->started_list);
        INIT_LIST_HEAD(&dep->cancelled_list);
 
+       dwc3_debugfs_create_endpoint_dir(dep);
+
        return 0;
 }
 
@@ -2796,6 +2798,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
                        list_del(&dep->endpoint.ep_list);
                }
 
+               debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
                kfree(dep);
        }
 }
@@ -2873,15 +2876,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
        struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
        struct scatterlist *sg = req->sg;
        struct scatterlist *s;
-       unsigned int pending = req->num_pending_sgs;
+       unsigned int num_queued = req->num_queued_sgs;
        unsigned int i;
        int ret = 0;
 
-       for_each_sg(sg, s, pending, i) {
+       for_each_sg(sg, s, num_queued, i) {
                trb = &dep->trb_pool[dep->trb_dequeue];
 
                req->sg = sg_next(s);
-               req->num_pending_sgs--;
+               req->num_queued_sgs--;
 
                ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
                                trb, event, status, true);
@@ -2904,7 +2907,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
 
 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
 {
-       return req->num_pending_sgs == 0;
+       return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
 }
 
 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@@ -2913,7 +2916,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
 {
        int ret;
 
-       if (req->num_pending_sgs)
+       if (req->request.num_mapped_sgs)
                ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
                                status);
        else
@@ -4045,6 +4048,7 @@ err5:
        dwc3_gadget_free_endpoints(dwc);
 err4:
        usb_put_gadget(dwc->gadget);
+       dwc->gadget = NULL;
 err3:
        dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
                        dwc->bounce_addr);
@@ -4064,6 +4068,9 @@ err0:
 
 void dwc3_gadget_exit(struct dwc3 *dwc)
 {
+       if (!dwc->gadget)
+               return;
+
        usb_del_gadget(dwc->gadget);
        dwc3_gadget_free_endpoints(dwc);
        usb_put_gadget(dwc->gadget);
index 8bb2577..0550760 100644 (file)
@@ -164,6 +164,14 @@ int usb_assign_descriptors(struct usb_function *f,
 {
        struct usb_gadget *g = f->config->cdev->gadget;
 
+       /* super-speed-plus descriptor falls back to super-speed one,
+        * if such a descriptor was provided, thus avoiding a NULL
+        * pointer dereference if a 5gbps capable gadget is used with
+        * a 10gbps capable config (device port + cable + host port)
+        */
+       if (!ssp)
+               ssp = ss;
+
        if (fs) {
                f->fs_descriptors = usb_copy_descriptors(fs);
                if (!f->fs_descriptors)
index 7f5cf48..ffe2486 100644 (file)
@@ -791,7 +791,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
                fs_ecm_notify_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
-                       ecm_ss_function, NULL);
+                       ecm_ss_function, ecm_ss_function);
        if (status)
                goto fail;
 
index cfcc4e8..2cd9942 100644 (file)
@@ -302,7 +302,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
        eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
-                       eem_ss_function, NULL);
+                       eem_ss_function, eem_ss_function);
        if (status)
                goto fail;
 
@@ -495,7 +495,7 @@ static int eem_unwrap(struct gether *port,
                        skb2 = skb_clone(skb, GFP_ATOMIC);
                        if (unlikely(!skb2)) {
                                DBG(cdev, "unable to unframe EEM packet\n");
-                               continue;
+                               goto next;
                        }
                        skb_trim(skb2, len - ETH_FCS_LEN);
 
@@ -505,7 +505,7 @@ static int eem_unwrap(struct gether *port,
                                                GFP_ATOMIC);
                        if (unlikely(!skb3)) {
                                dev_kfree_skb_any(skb2);
-                               continue;
+                               goto next;
                        }
                        dev_kfree_skb_any(skb2);
                        skb_queue_tail(list, skb3);
index bf10919..d4844af 100644 (file)
@@ -3567,6 +3567,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
                ffs->func = NULL;
        }
 
+       /* Drain any pending AIO completions */
+       drain_workqueue(ffs->io_completion_wq);
+
        if (!--opts->refcnt)
                functionfs_unbind(ffs);
 
index 1125f47..e556993 100644 (file)
@@ -802,7 +802,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
                hidg_fs_out_ep_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, hidg_fs_descriptors,
-                       hidg_hs_descriptors, hidg_ss_descriptors, NULL);
+                       hidg_hs_descriptors, hidg_ss_descriptors,
+                       hidg_ss_descriptors);
        if (status)
                goto fail;
 
index b56ad7c..ae41f55 100644 (file)
@@ -207,7 +207,7 @@ autoconf_fail:
        ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
-                       ss_loopback_descs, NULL);
+                       ss_loopback_descs, ss_loopback_descs);
        if (ret)
                return ret;
 
index 019bea8..8551272 100644 (file)
@@ -583,7 +583,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
                data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
                data[1] = data[0];
 
-               DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+               DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
                ncm->notify_state = NCM_NOTIFY_CONNECT;
                break;
        }
@@ -1101,11 +1101,11 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
                        ncm->ndp_dgram_count = 1;
 
                        /* Note: we skip opts->next_ndp_index */
-               }
 
-               /* Delay the timer. */
-               hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
-                             HRTIMER_MODE_REL_SOFT);
+                       /* Start the timer. */
+                       hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+                                     HRTIMER_MODE_REL_SOFT);
+               }
 
                /* Add the datagram position entries */
                ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
index f47fdc1..59d382f 100644 (file)
@@ -1101,7 +1101,8 @@ autoconf_fail:
        ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_printer_function,
-                       hs_printer_function, ss_printer_function, NULL);
+                       hs_printer_function, ss_printer_function,
+                       ss_printer_function);
        if (ret)
                return ret;
 
index 0739b05..ee95e8f 100644 (file)
@@ -789,7 +789,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
        ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
-                       eth_ss_function, NULL);
+                       eth_ss_function, eth_ss_function);
        if (status)
                goto fail;
 
index e627138..1ed8ff0 100644 (file)
@@ -233,7 +233,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
        gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
-                       gser_ss_function, NULL);
+                       gser_ss_function, gser_ss_function);
        if (status)
                goto fail;
        dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
index 5a201ba..1abf08e 100644 (file)
@@ -431,7 +431,8 @@ no_iso:
        ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, fs_source_sink_descs,
-                       hs_source_sink_descs, ss_source_sink_descs, NULL);
+                       hs_source_sink_descs, ss_source_sink_descs,
+                       ss_source_sink_descs);
        if (ret)
                return ret;
 
index 4d94525..51c1cae 100644 (file)
@@ -358,7 +358,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
                fs_subset_out_desc.bEndpointAddress;
 
        status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
-                       ss_eth_function, NULL);
+                       ss_eth_function, ss_eth_function);
        if (status)
                goto fail;
 
index 7acb507..de161ee 100644 (file)
@@ -2057,7 +2057,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
        uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
 
        ret = usb_assign_descriptors(f, uasp_fs_function_desc,
-                       uasp_hs_function_desc, uasp_ss_function_desc, NULL);
+                       uasp_hs_function_desc, uasp_ss_function_desc,
+                       uasp_ss_function_desc);
        if (ret)
                goto ep_fail;
 
index 0c418ce..f1b35a3 100644 (file)
@@ -1488,7 +1488,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
                             struct renesas_usb3_request *usb3_req)
 {
        struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
-       struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep);
+       struct renesas_usb3_request *usb3_req_first;
        unsigned long flags;
        int ret = -EAGAIN;
        u32 enable_bits = 0;
@@ -1496,7 +1496,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
        spin_lock_irqsave(&usb3->lock, flags);
        if (usb3_ep->halt || usb3_ep->started)
                goto out;
-       if (usb3_req != usb3_req_first)
+       usb3_req_first = __usb3_get_request(usb3_ep);
+       if (!usb3_req_first || usb3_req != usb3_req_first)
                goto out;
 
        if (usb3_pn_change(usb3, usb3_ep->num) < 0)
index 7bc18cf..18c2bbd 100644 (file)
@@ -59,6 +59,7 @@
 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
 
+#define PCI_DEVICE_ID_AMD_RENOIR_XHCI                  0x1639
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_2                        0x43bb
@@ -182,6 +183,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
                xhci->quirks |= XHCI_U2_DISABLE_WAKE;
 
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+               pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
+               xhci->quirks |= XHCI_BROKEN_D3COLD;
+
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
@@ -539,7 +544,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
         * Systems with the TI redriver that loses port status change events
         * need to have the registers polled during D3, so avoid D3cold.
         */
-       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+       if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
                pci_d3cold_disable(pdev);
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
index a8e4189..6acd232 100644 (file)
@@ -828,14 +828,10 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
        list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
                                 cancelled_td_list) {
 
-               /*
-                * Doesn't matter what we pass for status, since the core will
-                * just overwrite it (because the URB has been unlinked).
-                */
                ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
 
                if (td->cancel_status == TD_CLEARED)
-                       xhci_td_cleanup(ep->xhci, td, ring, 0);
+                       xhci_td_cleanup(ep->xhci, td, ring, td->status);
 
                if (ep->xhci->xhc_state & XHCI_STATE_DYING)
                        return;
@@ -937,14 +933,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
                        continue;
                }
                /*
-                * If ring stopped on the TD we need to cancel, then we have to
+                * If a ring stopped on the TD we need to cancel then we have to
                 * move the xHC endpoint ring dequeue pointer past this TD.
+                * Rings halted due to STALL may show hw_deq is past the stalled
+                * TD, but still require a set TR Deq command to flush xHC cache.
                 */
                hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
                                         td->urb->stream_id);
                hw_deq &= ~0xf;
 
-               if (trb_in_td(xhci, td->start_seg, td->first_trb,
+               if (td->cancel_status == TD_HALTED) {
+                       cached_td = td;
+               } else if (trb_in_td(xhci, td->start_seg, td->first_trb,
                              td->last_trb, hw_deq, false)) {
                        switch (td->cancel_status) {
                        case TD_CLEARED: /* TD is already no-op */
index 2595a8f..e417f5c 100644 (file)
@@ -1892,6 +1892,7 @@ struct xhci_hcd {
 #define XHCI_DISABLE_SPARSE    BIT_ULL(38)
 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK   BIT_ULL(39)
 #define XHCI_NO_SOFT_RETRY     BIT_ULL(40)
+#define XHCI_BROKEN_D3COLD     BIT_ULL(41)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
index b3cfe86..3366530 100644 (file)
@@ -263,6 +263,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
                return -EINVAL;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!r)
+               return -EINVAL;
 
        pdata = devm_kzalloc(&pdev->dev,
                             sizeof(*pdata) +
index a3dfc77..26baba3 100644 (file)
@@ -61,9 +61,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
        /* Set speed */
        retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
                                 0x01, /* vendor request: set speed */
-                                USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+                                USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
                                 tv->speed, /* speed value */
-                                0, NULL, 0, USB_CTRL_GET_TIMEOUT);
+                                0, NULL, 0, USB_CTRL_SET_TIMEOUT);
        if (retval) {
                tv->speed = old;
                dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
index b5d6616..748139d 100644 (file)
@@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
        parport_announce_port(pp);
 
        usb_set_intfdata(intf, pp);
+       usb_put_dev(usbdev);
        return 0;
 
 probe_abort:
index 8f09a38..4c8f011 100644 (file)
@@ -2009,9 +2009,8 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                        schedule_delayed_work(&musb->irq_work,
                                              msecs_to_jiffies(1000));
                        musb->quirk_retries--;
-                       break;
                }
-               fallthrough;
+               break;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
index ee595d1..fcb812b 100644 (file)
@@ -252,9 +252,11 @@ struct cp210x_serial_private {
        u8                      gpio_input;
 #endif
        u8                      partnum;
+       u32                     fw_version;
        speed_t                 min_speed;
        speed_t                 max_speed;
        bool                    use_actual_rate;
+       bool                    no_flow_control;
 };
 
 enum cp210x_event_state {
@@ -398,6 +400,7 @@ struct cp210x_special_chars {
 
 /* CP210X_VENDOR_SPECIFIC values */
 #define CP210X_READ_2NCONFIG   0x000E
+#define CP210X_GET_FW_VER_2N   0x0010
 #define CP210X_READ_LATCH      0x00C2
 #define CP210X_GET_PARTNUM     0x370B
 #define CP210X_GET_PORTCONFIG  0x370C
@@ -537,6 +540,12 @@ struct cp210x_single_port_config {
 #define CP210X_2NCONFIG_GPIO_RSTLATCH_IDX      587
 #define CP210X_2NCONFIG_GPIO_CONTROL_IDX       600
 
+/* CP2102N QFN20 port configuration values */
+#define CP2102N_QFN20_GPIO2_TXLED_MODE         BIT(2)
+#define CP2102N_QFN20_GPIO3_RXLED_MODE         BIT(3)
+#define CP2102N_QFN20_GPIO1_RS485_MODE         BIT(4)
+#define CP2102N_QFN20_GPIO0_CLK_MODE           BIT(6)
+
 /* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
 struct cp210x_gpio_write {
        u8      mask;
@@ -1122,6 +1131,7 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
 static void cp210x_set_flow_control(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
+       struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
        struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
        struct cp210x_special_chars chars;
        struct cp210x_flow_ctl flow_ctl;
@@ -1129,6 +1139,15 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
        u32 ctl_hs;
        int ret;
 
+       /*
+        * Some CP2102N interpret ulXonLimit as ulFlowReplace (erratum
+        * CP2102N_E104). Report back that flow control is not supported.
+        */
+       if (priv->no_flow_control) {
+               tty->termios.c_cflag &= ~CRTSCTS;
+               tty->termios.c_iflag &= ~(IXON | IXOFF);
+       }
+
        if (old_termios &&
                        C_CRTSCTS(tty) == (old_termios->c_cflag & CRTSCTS) &&
                        I_IXON(tty) == (old_termios->c_iflag & IXON) &&
@@ -1185,19 +1204,20 @@ static void cp210x_set_flow_control(struct tty_struct *tty,
                port_priv->crtscts = false;
        }
 
-       if (I_IXOFF(tty))
+       if (I_IXOFF(tty)) {
                flow_repl |= CP210X_SERIAL_AUTO_RECEIVE;
-       else
+
+               flow_ctl.ulXonLimit = cpu_to_le32(128);
+               flow_ctl.ulXoffLimit = cpu_to_le32(128);
+       } else {
                flow_repl &= ~CP210X_SERIAL_AUTO_RECEIVE;
+       }
 
        if (I_IXON(tty))
                flow_repl |= CP210X_SERIAL_AUTO_TRANSMIT;
        else
                flow_repl &= ~CP210X_SERIAL_AUTO_TRANSMIT;
 
-       flow_ctl.ulXonLimit = cpu_to_le32(128);
-       flow_ctl.ulXoffLimit = cpu_to_le32(128);
-
        dev_dbg(&port->dev, "%s - ctrl = 0x%02x, flow = 0x%02x\n", __func__,
                        ctl_hs, flow_repl);
 
@@ -1733,7 +1753,19 @@ static int cp2102n_gpioconf_init(struct usb_serial *serial)
        priv->gpio_pushpull = (gpio_pushpull >> 3) & 0x0f;
 
        /* 0 indicates GPIO mode, 1 is alternate function */
-       priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+       if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN20) {
+               /* QFN20 is special... */
+               if (gpio_ctrl & CP2102N_QFN20_GPIO0_CLK_MODE)   /* GPIO 0 */
+                       priv->gpio_altfunc |= BIT(0);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO1_RS485_MODE) /* GPIO 1 */
+                       priv->gpio_altfunc |= BIT(1);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO2_TXLED_MODE) /* GPIO 2 */
+                       priv->gpio_altfunc |= BIT(2);
+               if (gpio_ctrl & CP2102N_QFN20_GPIO3_RXLED_MODE) /* GPIO 3 */
+                       priv->gpio_altfunc |= BIT(3);
+       } else {
+               priv->gpio_altfunc = (gpio_ctrl >> 2) & 0x0f;
+       }
 
        if (priv->partnum == CP210X_PARTNUM_CP2102N_QFN28) {
                /*
@@ -1908,6 +1940,45 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
        priv->use_actual_rate = use_actual_rate;
 }
 
+static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       u8 ver[3];
+       int ret;
+
+       ret = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, value,
+                       ver, sizeof(ver));
+       if (ret)
+               return ret;
+
+       dev_dbg(&serial->interface->dev, "%s - %d.%d.%d\n", __func__,
+                       ver[0], ver[1], ver[2]);
+
+       priv->fw_version = ver[0] << 16 | ver[1] << 8 | ver[2];
+
+       return 0;
+}
+
+static void cp210x_determine_quirks(struct usb_serial *serial)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       int ret;
+
+       switch (priv->partnum) {
+       case CP210X_PARTNUM_CP2102N_QFN28:
+       case CP210X_PARTNUM_CP2102N_QFN24:
+       case CP210X_PARTNUM_CP2102N_QFN20:
+               ret = cp210x_get_fw_version(serial, CP210X_GET_FW_VER_2N);
+               if (ret)
+                       break;
+               if (priv->fw_version <= 0x10004)
+                       priv->no_flow_control = true;
+               break;
+       default:
+               break;
+       }
+}
+
 static int cp210x_attach(struct usb_serial *serial)
 {
        int result;
@@ -1928,6 +1999,7 @@ static int cp210x_attach(struct usb_serial *serial)
 
        usb_set_serial_data(serial, priv);
 
+       cp210x_determine_quirks(serial);
        cp210x_init_max_speed(serial);
 
        result = cp210x_gpio_init(serial);
index 6f2659e..4a1f3a9 100644 (file)
@@ -611,6 +611,7 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
@@ -1034,6 +1035,9 @@ static const struct usb_device_id id_table_combined[] = {
        /* Sienna devices */
        { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
        { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+       /* IDS GmbH devices */
+       { USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
+       { USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
        /* U-Blox devices */
        { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
        { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
index 3d47c6d..add602b 100644 (file)
 #define FTDI_NT_ORIONLXM_PID           0x7c90  /* OrionLXm Substation Automation Platform */
 #define FTDI_NT_ORIONLX_PLUS_PID       0x7c91  /* OrionLX+ Substation Automation Platform */
 #define FTDI_NT_ORION_IO_PID           0x7c92  /* Orion I/O */
+#define FTDI_NT_ORIONMX_PID            0x7c93  /* OrionMX */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
 #define UNJO_ISODEBUG_V1_PID           0x150D
 
 /*
+ * IDS GmbH
+ */
+#define IDS_VID                                0x2CAF
+#define IDS_SI31A_PID                  0x13A2
+#define IDS_CM31A_PID                  0x13A3
+
+/*
  * U-Blox products (http://www.u-blox.com).
  */
 #define UBLOX_VID                      0x1546
index 83c62f9..41f1b87 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * USB ZyXEL omni.net LCD PLUS driver
+ * USB ZyXEL omni.net driver
  *
  * Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
  *
 #include <linux/usb/serial.h>
 
 #define DRIVER_AUTHOR "Alessandro Zummo"
-#define DRIVER_DESC "USB ZyXEL omni.net LCD PLUS Driver"
+#define DRIVER_DESC "USB ZyXEL omni.net Driver"
 
 #define ZYXEL_VENDOR_ID                0x0586
 #define ZYXEL_OMNINET_ID       0x1000
+#define ZYXEL_OMNI_56K_PLUS_ID 0x1500
 /* This one seems to be a re-branded ZyXEL device */
 #define BT_IGNITIONPRO_ID      0x2000
 
@@ -40,6 +41,7 @@ static void omninet_port_remove(struct usb_serial_port *port);
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
+       { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) },
        { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
        { }                                             /* Terminating entry */
 };
@@ -50,7 +52,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
                .owner =        THIS_MODULE,
                .name =         "omninet",
        },
-       .description =          "ZyXEL - omni.net lcd plus usb",
+       .description =          "ZyXEL - omni.net usb",
        .id_table =             id_table,
        .num_bulk_out =         2,
        .calc_num_ports =       omninet_calc_num_ports,
index 3e79a54..7608584 100644 (file)
@@ -1240,6 +1240,10 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
          .driver_info = NCTRL(0) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff),    /* Telit LE910-S1 (RNDIS) */
+         .driver_info = NCTRL(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff),    /* Telit LE910-S1 (ECM) */
+         .driver_info = NCTRL(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x9010),                          /* Telit SBL FN980 flashing device */
          .driver_info = NCTRL(0) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
index fd773d2..940050c 100644 (file)
@@ -113,6 +113,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
+       { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) },
        { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
        { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
        { }                                     /* Terminating entry */
index 0f681dd..6097ee8 100644 (file)
 /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
 #define ADLINK_VENDOR_ID               0x0b63
 #define ADLINK_ND6530_PRODUCT_ID       0x6530
+#define ADLINK_ND6530GC_PRODUCT_ID     0x653a
 
 /* SMART USB Serial Adapter */
 #define SMART_VENDOR_ID        0x0b8c
index 5f2e7f6..067690d 100644 (file)
@@ -416,7 +416,7 @@ static void qt2_close(struct usb_serial_port *port)
 
        /* flush the port transmit buffer */
        i = usb_control_msg(serial->dev,
-                           usb_rcvctrlpipe(serial->dev, 0),
+                           usb_sndctrlpipe(serial->dev, 0),
                            QT2_FLUSH_DEVICE, 0x40, 1,
                            port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
 
@@ -426,7 +426,7 @@ static void qt2_close(struct usb_serial_port *port)
 
        /* flush the port receive buffer */
        i = usb_control_msg(serial->dev,
-                           usb_rcvctrlpipe(serial->dev, 0),
+                           usb_sndctrlpipe(serial->dev, 0),
                            QT2_FLUSH_DEVICE, 0x40, 0,
                            port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT);
 
@@ -639,7 +639,7 @@ static int qt2_attach(struct usb_serial *serial)
        int status;
 
        /* power on unit */
-       status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+       status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                                 0xc2, 0x40, 0x8000, 0, NULL, 0,
                                 QT2_USB_TIMEOUT);
        if (status < 0) {
index caa46ac..310db5a 100644 (file)
@@ -37,6 +37,7 @@
 /* Vendor and product ids */
 #define TI_VENDOR_ID                   0x0451
 #define IBM_VENDOR_ID                  0x04b3
+#define STARTECH_VENDOR_ID             0x14b0
 #define TI_3410_PRODUCT_ID             0x3410
 #define IBM_4543_PRODUCT_ID            0x4543
 #define IBM_454B_PRODUCT_ID            0x454b
@@ -370,6 +371,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+       { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
        { }     /* terminator */
 };
 
@@ -408,6 +410,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
        { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
+       { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) },
        { }     /* terminator */
 };
 
index 9da22ae..77dabd3 100644 (file)
@@ -191,6 +191,7 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
        bool match;
        int nval;
        u16 *val;
+       int ret;
        int i;
 
        /*
@@ -218,10 +219,10 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
        if (!val)
                return ERR_PTR(-ENOMEM);
 
-       nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
-       if (nval < 0) {
+       ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
+       if (ret < 0) {
                kfree(val);
-               return ERR_PTR(nval);
+               return ERR_PTR(ret);
        }
 
        for (i = 0; i < nval; i++) {
@@ -238,7 +239,7 @@ find_mux:
        dev = class_find_device(&typec_mux_class, NULL, fwnode,
                                mux_fwnode_match);
 
-       return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+       return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
 }
 
 /**
index 46a25b8..ffa8aa1 100644 (file)
@@ -582,10 +582,15 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
        acpi_dev_free_resource_list(&resource_list);
 
        if (!pmc->iom_base) {
-               put_device(&adev->dev);
+               acpi_dev_put(adev);
                return -ENOMEM;
        }
 
+       if (IS_ERR(pmc->iom_base)) {
+               acpi_dev_put(adev);
+               return PTR_ERR(pmc->iom_base);
+       }
+
        pmc->iom_adev = adev;
 
        return 0;
@@ -636,8 +641,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
                        break;
 
                ret = pmc_usb_register_port(pmc, i, fwnode);
-               if (ret)
+               if (ret) {
+                       fwnode_handle_put(fwnode);
                        goto err_remove_ports;
+               }
        }
 
        platform_set_drvdata(pdev, pmc);
@@ -651,7 +658,7 @@ err_remove_ports:
                usb_role_switch_unregister(pmc->port[i].usb_sw);
        }
 
-       put_device(&pmc->iom_adev->dev);
+       acpi_dev_put(pmc->iom_adev);
 
        return ret;
 }
@@ -667,7 +674,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
                usb_role_switch_unregister(pmc->port[i].usb_sw);
        }
 
-       put_device(&pmc->iom_adev->dev);
+       acpi_dev_put(pmc->iom_adev);
 
        return 0;
 }
index 64133e5..63470cf 100644 (file)
@@ -401,6 +401,8 @@ struct tcpm_port {
        unsigned int nr_src_pdo;
        u32 snk_pdo[PDO_MAX_OBJECTS];
        unsigned int nr_snk_pdo;
+       u32 snk_vdo_v1[VDO_MAX_OBJECTS];
+       unsigned int nr_snk_vdo_v1;
        u32 snk_vdo[VDO_MAX_OBJECTS];
        unsigned int nr_snk_vdo;
 
@@ -1547,33 +1549,43 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
                        if (PD_VDO_VID(p[0]) != USB_SID_PD)
                                break;
 
-                       if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+                       if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
                                typec_partner_set_svdm_version(port->partner,
                                                               PD_VDO_SVDM_VER(p[0]));
-                       /* 6.4.4.3.1: Only respond as UFP (device) */
-                       if (port->data_role == TYPEC_DEVICE &&
+                               svdm_version = PD_VDO_SVDM_VER(p[0]);
+                       }
+
+                       port->ams = DISCOVER_IDENTITY;
+                       /*
+                        * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
+                        * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
+                        * "wrong configuation" or "Unrecognized"
+                        */
+                       if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
                            port->nr_snk_vdo) {
-                               /*
-                                * Product Type DFP and Connector Type are not defined in SVDM
-                                * version 1.0 and shall be set to zero.
-                                */
-                               if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
-                                       response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
-                                                     & ~IDH_CONN_MASK;
-                               else
-                                       response[1] = port->snk_vdo[0];
-                               for (i = 1; i <  port->nr_snk_vdo; i++)
-                                       response[i + 1] = port->snk_vdo[i];
-                               rlen = port->nr_snk_vdo + 1;
+                               if (svdm_version < SVDM_VER_2_0) {
+                                       for (i = 0; i < port->nr_snk_vdo_v1; i++)
+                                               response[i + 1] = port->snk_vdo_v1[i];
+                                       rlen = port->nr_snk_vdo_v1 + 1;
+
+                               } else {
+                                       for (i = 0; i < port->nr_snk_vdo; i++)
+                                               response[i + 1] = port->snk_vdo[i];
+                                       rlen = port->nr_snk_vdo + 1;
+                               }
                        }
                        break;
                case CMD_DISCOVER_SVID:
+                       port->ams = DISCOVER_SVIDS;
                        break;
                case CMD_DISCOVER_MODES:
+                       port->ams = DISCOVER_MODES;
                        break;
                case CMD_ENTER_MODE:
+                       port->ams = DFP_TO_UFP_ENTER_MODE;
                        break;
                case CMD_EXIT_MODE:
+                       port->ams = DFP_TO_UFP_EXIT_MODE;
                        break;
                case CMD_ATTENTION:
                        /* Attention command does not have response */
@@ -1930,6 +1942,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                        tcpm_log(port, "VDM Tx error, retry");
                        port->vdm_retries++;
                        port->vdm_state = VDM_STATE_READY;
+                       if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
+                               tcpm_ams_finish(port);
+               } else {
                        tcpm_ams_finish(port);
                }
                break;
@@ -2176,20 +2191,25 @@ static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
 
        if (!type) {
                tcpm_log(port, "Alert message received with no type");
+               tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
                return;
        }
 
        /* Just handling non-battery alerts for now */
        if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
-               switch (port->state) {
-               case SRC_READY:
-               case SNK_READY:
+               if (port->pwr_role == TYPEC_SOURCE) {
+                       port->upcoming_state = GET_STATUS_SEND;
+                       tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
+               } else {
+                       /*
+                        * Do not check SinkTxOk here in case the Source doesn't set its Rp to
+                        * SinkTxOk in time.
+                        */
+                       port->ams = GETTING_SOURCE_SINK_STATUS;
                        tcpm_set_state(port, GET_STATUS_SEND, 0);
-                       break;
-               default:
-                       tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
-                       break;
                }
+       } else {
+               tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
        }
 }
 
@@ -2287,6 +2307,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
        bool frs_enable;
        int ret;
 
+       if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
+               port->vdm_state = VDM_STATE_ERR_BUSY;
+               tcpm_ams_finish(port);
+               mod_vdm_delayed_work(port, 0);
+       }
+
        switch (type) {
        case PD_DATA_SOURCE_CAP:
                for (i = 0; i < cnt; i++)
@@ -2417,14 +2443,22 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
                                           NONE_AMS);
                break;
        case PD_DATA_VENDOR_DEF:
-               tcpm_handle_vdm_request(port, msg->payload, cnt);
+               if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
+                       tcpm_handle_vdm_request(port, msg->payload, cnt);
+               else if (port->negotiated_rev > PD_REV20)
+                       tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
                break;
        case PD_DATA_BIST:
                port->bist_request = le32_to_cpu(msg->payload[0]);
                tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
                break;
        case PD_DATA_ALERT:
-               tcpm_handle_alert(port, msg->payload, cnt);
+               if (port->state != SRC_READY && port->state != SNK_READY)
+                       tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+                                            SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+                                            NONE_AMS, 0);
+               else
+                       tcpm_handle_alert(port, msg->payload, cnt);
                break;
        case PD_DATA_BATT_STATUS:
        case PD_DATA_GET_COUNTRY_INFO:
@@ -2459,6 +2493,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
        enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
        enum tcpm_state next_state;
 
+       /*
+        * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
+        * VDM AMS if waiting for VDM responses and will be handled later.
+        */
+       if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
+               port->vdm_state = VDM_STATE_ERR_BUSY;
+               tcpm_ams_finish(port);
+               mod_vdm_delayed_work(port, 0);
+       }
+
        switch (type) {
        case PD_CTRL_GOOD_CRC:
        case PD_CTRL_PING:
@@ -2717,7 +2761,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
        enum pd_ext_msg_type type = pd_header_type_le(msg->header);
        unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
 
-       if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
+       /* stopping VDM state machine if interrupted by other Messages */
+       if (tcpm_vdm_ams(port)) {
+               port->vdm_state = VDM_STATE_ERR_BUSY;
+               tcpm_ams_finish(port);
+               mod_vdm_delayed_work(port, 0);
+       }
+
+       if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
                tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
                tcpm_log(port, "Unchunked extended messages unsupported");
                return;
@@ -2731,24 +2782,16 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
 
        switch (type) {
        case PD_EXT_STATUS:
-               /*
-                * If PPS related events raised then get PPS status to clear
-                * (see USB PD 3.0 Spec, 6.5.2.4)
-                */
-               if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
-                   USB_PD_EXT_SDB_PPS_EVENTS)
-                       tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
-                                            GETTING_SOURCE_SINK_STATUS, 0);
-
-               else
-                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
-               break;
        case PD_EXT_PPS_STATUS:
-               /*
-                * For now the PPS status message is used to clear events
-                * and nothing more.
-                */
-               tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+               if (port->ams == GETTING_SOURCE_SINK_STATUS) {
+                       tcpm_ams_finish(port);
+                       tcpm_set_state(port, ready_state(port), 0);
+               } else {
+                       /* unexpected Status or PPS_Status Message */
+                       tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+                                            SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+                                            NONE_AMS, 0);
+               }
                break;
        case PD_EXT_SOURCE_CAP_EXT:
        case PD_EXT_GET_BATT_CAP:
@@ -2811,7 +2854,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
                                 "Data role mismatch, initiating error recovery");
                        tcpm_set_state(port, ERROR_RECOVERY, 0);
                } else {
-                       if (msg->header & PD_HEADER_EXT_HDR)
+                       if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
                                tcpm_pd_ext_msg_request(port, msg);
                        else if (cnt)
                                tcpm_pd_data_request(port, msg);
@@ -5914,6 +5957,22 @@ sink:
                        return ret;
        }
 
+       /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
+       if (port->nr_snk_vdo) {
+               ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
+               if (ret < 0)
+                       return ret;
+               else if (ret == 0)
+                       return -ENODATA;
+
+               port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
+               ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
+                                                    port->snk_vdo_v1,
+                                                    port->nr_snk_vdo_v1);
+               if (ret < 0)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -6279,6 +6338,11 @@ void tcpm_unregister_port(struct tcpm_port *port)
 {
        int i;
 
+       hrtimer_cancel(&port->send_discover_timer);
+       hrtimer_cancel(&port->enable_frs_timer);
+       hrtimer_cancel(&port->vdm_state_machine_timer);
+       hrtimer_cancel(&port->state_machine_timer);
+
        tcpm_reset_port(port);
        for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
                typec_unregister_altmode(port->port_altmode[i]);
index 79ae639..5d12533 100644 (file)
@@ -378,7 +378,7 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
                const u8 *data = (void *)msg;
                int i;
 
-               for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+               for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
                        ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
                                           data[i]);
                        if (ret)
index 1d8b7df..b7d104c 100644 (file)
@@ -717,8 +717,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
        ucsi_send_command(con->ucsi, command, NULL, 0);
 
        /* 3. ACK connector change */
-       clear_bit(EVENT_PENDING, &ucsi->flags);
        ret = ucsi_acknowledge_connector_change(ucsi);
+       clear_bit(EVENT_PENDING, &ucsi->flags);
        if (ret) {
                dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
                goto out_unlock;
@@ -1253,6 +1253,7 @@ err_unregister:
        }
 
 err_reset:
+       memset(&ucsi->cap, 0, sizeof(ucsi->cap));
        ucsi_reset_ppm(ucsi);
 err:
        return ret;
index 53ce78d..5e2e1b9 100644 (file)
@@ -2,6 +2,7 @@
 config VFIO_PCI
        tristate "VFIO support for PCI devices"
        depends on VFIO && PCI && EVENTFD
+       depends on MMU
        select VFIO_VIRQFD
        select IRQ_BYPASS_MANAGER
        help
index d57f037..70e28ef 100644 (file)
@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
                        if (len == 0xFF) {
                                len = vfio_ext_cap_len(vdev, ecap, epos);
                                if (len < 0)
-                                       return ret;
+                                       return len;
                        }
                }
 
index 361e5b5..470fcf7 100644 (file)
@@ -291,7 +291,7 @@ err_irq:
        vfio_platform_regions_cleanup(vdev);
 err_reg:
        mutex_unlock(&driver_lock);
-       module_put(THIS_MODULE);
+       module_put(vdev->parent_module);
        return ret;
 }
 
index a0747c3..a3e925a 100644 (file)
@@ -2795,7 +2795,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
                return 0;
        }
 
-       size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
+       size = struct_size(cap_iovas, iova_ranges, iovas);
 
        cap_iovas = kzalloc(size, GFP_KERNEL);
        if (!cap_iovas)
index b292887..a591d29 100644 (file)
@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        get_page(page);
+
+       if (vmf->vma->vm_file)
+               page->mapping = vmf->vma->vm_file->f_mapping;
+       else
+               printk(KERN_ERR "no mapping available\n");
+
+       BUG_ON(!page->mapping);
        page->index = vmf->pgoff;
 
        vmf->page = page;
@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
        .page_mkwrite   = fb_deferred_io_mkwrite,
 };
 
+static int fb_deferred_io_set_page_dirty(struct page *page)
+{
+       if (!PageDirty(page))
+               SetPageDirty(page);
+       return 0;
+}
+
+static const struct address_space_operations fb_deferred_io_aops = {
+       .set_page_dirty = fb_deferred_io_set_page_dirty,
+};
+
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info)
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
 
+void fb_deferred_io_open(struct fb_info *info,
+                        struct inode *inode,
+                        struct file *file)
+{
+       file->f_mapping->a_ops = &fb_deferred_io_aops;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct page *page;
+       int i;
 
        BUG_ON(!fbdefio);
        cancel_delayed_work_sync(&info->deferred_work);
+
+       /* clear out the mapping that we setup */
+       for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+               page = fb_deferred_io_page(info, i);
+               page->mapping = NULL;
+       }
+
        mutex_destroy(&fbdefio->lock);
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
index 072780b..98f1930 100644 (file)
@@ -1415,6 +1415,10 @@ __releases(&info->lock)
                if (res)
                        module_put(info->fbops->owner);
        }
+#ifdef CONFIG_FB_DEFERRED_IO
+       if (info->fbdefio)
+               fb_deferred_io_open(info, inode, file);
+#endif
 out:
        unlock_fb_info(info);
        if (res)
index cc8e62a..bd3d07a 100644 (file)
@@ -558,7 +558,7 @@ static int hgafb_probe(struct platform_device *pdev)
        int ret;
 
        ret = hga_card_detect();
-       if (!ret)
+       if (ret)
                return ret;
 
        printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
index 9fbe5a5..78719f2 100644 (file)
@@ -1919,7 +1919,9 @@ static void afs_rename_edit_dir(struct afs_operation *op)
        new_inode = d_inode(new_dentry);
        if (new_inode) {
                spin_lock(&new_inode->i_lock);
-               if (new_inode->i_nlink > 0)
+               if (S_ISDIR(new_inode->i_mode))
+                       clear_nlink(new_inode);
+               else if (new_inode->i_nlink > 0)
                        drop_nlink(new_inode);
                spin_unlock(&new_inode->i_lock);
        }
index b297525..179004b 100644 (file)
@@ -203,8 +203,8 @@ static int __init afs_init(void)
                goto error_fs;
 
        afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
-       if (IS_ERR(afs_proc_symlink)) {
-               ret = PTR_ERR(afs_proc_symlink);
+       if (!afs_proc_symlink) {
+               ret = -ENOMEM;
                goto error_proc;
        }
 
index 3edb620..e9ccaa3 100644 (file)
@@ -730,7 +730,7 @@ static int afs_writepages_region(struct address_space *mapping,
                        return ret;
                }
 
-               start += ret * PAGE_SIZE;
+               start += ret;
 
                cond_resched();
        } while (wbc->nr_to_write > 0);
@@ -837,6 +837,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        struct inode *inode = file_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
        unsigned long priv;
+       vm_fault_t ret = VM_FAULT_RETRY;
 
        _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
 
@@ -848,14 +849,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 #ifdef CONFIG_AFS_FSCACHE
        if (PageFsCache(page) &&
            wait_on_page_fscache_killable(page) < 0)
-               return VM_FAULT_RETRY;
+               goto out;
 #endif
 
        if (wait_on_page_writeback_killable(page))
-               return VM_FAULT_RETRY;
+               goto out;
 
        if (lock_page_killable(page) < 0)
-               return VM_FAULT_RETRY;
+               goto out;
 
        /* We mustn't change page->private until writeback is complete as that
         * details the portion of the page we need to write back and we might
@@ -863,7 +864,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
         */
        if (wait_on_page_writeback_killable(page) < 0) {
                unlock_page(page);
-               return VM_FAULT_RETRY;
+               goto out;
        }
 
        priv = afs_page_dirty(page, 0, thp_size(page));
@@ -877,8 +878,10 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        }
        file_update_time(file);
 
+       ret = VM_FAULT_LOCKED;
+out:
        sb_end_pagefault(inode->i_sb);
-       return VM_FAULT_LOCKED;
+       return ret;
 }
 
 /*
index aa57bdc..6d5c4e4 100644 (file)
@@ -2442,16 +2442,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
        spin_lock(&sinfo->lock);
        spin_lock(&cache->lock);
        if (!--cache->ro) {
-               num_bytes = cache->length - cache->reserved -
-                           cache->pinned - cache->bytes_super -
-                           cache->zone_unusable - cache->used;
-               sinfo->bytes_readonly -= num_bytes;
                if (btrfs_is_zoned(cache->fs_info)) {
                        /* Migrate zone_unusable bytes back */
                        cache->zone_unusable = cache->alloc_offset - cache->used;
                        sinfo->bytes_zone_unusable += cache->zone_unusable;
                        sinfo->bytes_readonly -= cache->zone_unusable;
                }
+               num_bytes = cache->length - cache->reserved -
+                           cache->pinned - cache->bytes_super -
+                           cache->zone_unusable - cache->used;
+               sinfo->bytes_readonly -= num_bytes;
                list_del_init(&cache->ro_list);
        }
        spin_unlock(&cache->lock);
index d17ac30..1346d69 100644 (file)
@@ -457,7 +457,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                int submit = 0;
-               int len;
+               int len = 0;
 
                page = compressed_pages[pg_index];
                page->mapping = inode->vfs_inode.i_mapping;
@@ -465,10 +465,17 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                        submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
                                                          0);
 
-               if (pg_index == 0 && use_append)
-                       len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0);
-               else
-                       len = bio_add_page(bio, page, PAGE_SIZE, 0);
+               /*
+                * Page can only be added to bio if the current bio fits in
+                * stripe.
+                */
+               if (!submit) {
+                       if (pg_index == 0 && use_append)
+                               len = bio_add_zone_append_page(bio, page,
+                                                              PAGE_SIZE, 0);
+                       else
+                               len = bio_add_page(bio, page, PAGE_SIZE, 0);
+               }
 
                page->mapping = NULL;
                if (submit || len < PAGE_SIZE) {
index c9a3036..8d386a5 100644 (file)
@@ -2648,6 +2648,24 @@ static int validate_super(struct btrfs_fs_info *fs_info,
                ret = -EINVAL;
        }
 
+       if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+                  BTRFS_FSID_SIZE)) {
+               btrfs_err(fs_info,
+               "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
+                       fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+               ret = -EINVAL;
+       }
+
+       if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
+           memcmp(fs_info->fs_devices->metadata_uuid,
+                  fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+               btrfs_err(fs_info,
+"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
+                       fs_info->super_copy->metadata_uuid,
+                       fs_info->fs_devices->metadata_uuid);
+               ret = -EINVAL;
+       }
+
        if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
                   BTRFS_FSID_SIZE) != 0) {
                btrfs_err(fs_info,
@@ -3279,14 +3297,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
 
        disk_super = fs_info->super_copy;
 
-       ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
-                      BTRFS_FSID_SIZE));
-
-       if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
-               ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
-                               fs_info->super_copy->metadata_uuid,
-                               BTRFS_FSID_SIZE));
-       }
 
        features = btrfs_super_flags(disk_super);
        if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
index f1d15b6..3d5c35e 100644 (file)
@@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
        trace_run_delayed_ref_head(fs_info, head, 0);
        btrfs_delayed_ref_unlock(head);
        btrfs_put_delayed_ref_head(head);
-       return 0;
+       return ret;
 }
 
 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
index 294602f..441cee7 100644 (file)
@@ -788,7 +788,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
        u64 end_byte = bytenr + len;
        u64 csum_end;
        struct extent_buffer *leaf;
-       int ret;
+       int ret = 0;
        const u32 csum_size = fs_info->csum_size;
        u32 blocksize_bits = fs_info->sectorsize_bits;
 
@@ -806,6 +806,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret > 0) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -862,7 +863,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_del_items(trans, root, path,
                                              path->slots[0], del_nr);
                        if (ret)
-                               goto out;
+                               break;
                        if (key.offset == bytenr)
                                break;
                } else if (key.offset < bytenr && csum_end > end_byte) {
@@ -906,8 +907,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_split_item(trans, root, path, &key, offset);
                        if (ret && ret != -EAGAIN) {
                                btrfs_abort_transaction(trans, ret);
-                               goto out;
+                               break;
                        }
+                       ret = 0;
 
                        key.offset = end_byte - 1;
                } else {
@@ -917,12 +919,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                }
                btrfs_release_path(path);
        }
-       ret = 0;
-out:
        btrfs_free_path(path);
        return ret;
 }
 
+static int find_next_csum_offset(struct btrfs_root *root,
+                                struct btrfs_path *path,
+                                u64 *next_offset)
+{
+       const u32 nritems = btrfs_header_nritems(path->nodes[0]);
+       struct btrfs_key found_key;
+       int slot = path->slots[0] + 1;
+       int ret;
+
+       if (nritems == 0 || slot >= nritems) {
+               ret = btrfs_next_leaf(root, path);
+               if (ret < 0) {
+                       return ret;
+               } else if (ret > 0) {
+                       *next_offset = (u64)-1;
+                       return 0;
+               }
+               slot = path->slots[0];
+       }
+
+       btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+
+       if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+           found_key.type != BTRFS_EXTENT_CSUM_KEY)
+               *next_offset = (u64)-1;
+       else
+               *next_offset = found_key.offset;
+
+       return 0;
+}
+
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_ordered_sum *sums)
@@ -938,7 +969,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
        u64 total_bytes = 0;
        u64 csum_offset;
        u64 bytenr;
-       u32 nritems;
        u32 ins_size;
        int index = 0;
        int found_next;
@@ -981,26 +1011,10 @@ again:
                        goto insert;
                }
        } else {
-               int slot = path->slots[0] + 1;
-               /* we didn't find a csum item, insert one */
-               nritems = btrfs_header_nritems(path->nodes[0]);
-               if (!nritems || (path->slots[0] >= nritems - 1)) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret < 0) {
-                               goto out;
-                       } else if (ret > 0) {
-                               found_next = 1;
-                               goto insert;
-                       }
-                       slot = path->slots[0];
-               }
-               btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
-               if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
-                   found_key.type != BTRFS_EXTENT_CSUM_KEY) {
-                       found_next = 1;
-                       goto insert;
-               }
-               next_offset = found_key.offset;
+               /* We didn't find a csum item, insert one. */
+               ret = find_next_csum_offset(root, path, &next_offset);
+               if (ret < 0)
+                       goto out;
                found_next = 1;
                goto insert;
        }
@@ -1056,8 +1070,48 @@ extend_csum:
                tmp = sums->len - total_bytes;
                tmp >>= fs_info->sectorsize_bits;
                WARN_ON(tmp < 1);
+               extend_nr = max_t(int, 1, tmp);
+
+               /*
+                * A log tree can already have checksum items with a subset of
+                * the checksums we are trying to log. This can happen after
+                * doing a sequence of partial writes into prealloc extents and
+                * fsyncs in between, with a full fsync logging a larger subrange
+                * of an extent for which a previous fast fsync logged a smaller
+                * subrange. And this happens in particular due to merging file
+                * extent items when we complete an ordered extent for a range
+                * covered by a prealloc extent - this is done at
+                * btrfs_mark_extent_written().
+                *
+                * So if we try to extend the previous checksum item, which has
+                * a range that ends at the start of the range we want to insert,
+                * make sure we don't extend beyond the start offset of the next
+                * checksum item. If we are at the last item in the leaf, then
+                * forget the optimization of extending and add a new checksum
+                * item - it is not worth the complexity of releasing the path,
+                * getting the first key for the next leaf, repeat the btree
+                * search, etc, because log trees are temporary anyway and it
+                * would only save a few bytes of leaf space.
+                */
+               if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+                       if (path->slots[0] + 1 >=
+                           btrfs_header_nritems(path->nodes[0])) {
+                               ret = find_next_csum_offset(root, path, &next_offset);
+                               if (ret < 0)
+                                       goto out;
+                               found_next = 1;
+                               goto insert;
+                       }
+
+                       ret = find_next_csum_offset(root, path, &next_offset);
+                       if (ret < 0)
+                               goto out;
+
+                       tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
+                       if (tmp <= INT_MAX)
+                               extend_nr = min_t(int, extend_nr, tmp);
+               }
 
-               extend_nr = max_t(int, 1, (int)tmp);
                diff = (csum_offset + extend_nr) * csum_size;
                diff = min(diff,
                           MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
index 3b10d98..55f6842 100644 (file)
@@ -1094,7 +1094,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
        int del_nr = 0;
        int del_slot = 0;
        int recow;
-       int ret;
+       int ret = 0;
        u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
@@ -1315,7 +1315,7 @@ again:
        }
 out:
        btrfs_free_path(path);
-       return 0;
+       return ret;
 }
 
 /*
index 33f1457..46f3929 100644 (file)
@@ -3000,6 +3000,18 @@ out:
        if (ret || truncated) {
                u64 unwritten_start = start;
 
+               /*
+                * If we failed to finish this ordered extent for any reason we
+                * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
+                * extent, and mark the inode with the error if it wasn't
+                * already set.  Any error during writeback would have already
+                * set the mapping error, so we need to set it if we're the ones
+                * marking this ordered extent as failed.
+                */
+               if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
+                                            &ordered_extent->flags))
+                       mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+
                if (truncated)
                        unwritten_start += logical_len;
                clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
@@ -9076,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       bool need_abort = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
@@ -9135,6 +9148,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             old_idx);
                if (ret)
                        goto out_fail;
+               need_abort = true;
        }
 
        /* And now for the dest. */
@@ -9150,8 +9164,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             new_ino,
                                             btrfs_ino(BTRFS_I(old_dir)),
                                             new_idx);
-               if (ret)
+               if (ret) {
+                       if (need_abort)
+                               btrfs_abort_transaction(trans, ret);
                        goto out_fail;
+               }
        }
 
        /* Update inode version and ctime/mtime. */
index d434dc7..9178da0 100644 (file)
@@ -203,10 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst,
                         * inline extent's data to the page.
                         */
                        ASSERT(key.offset > 0);
-                       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                                 inline_data, size, datal,
-                                                 comp_type);
-                       goto out;
+                       goto copy_to_page;
                }
        } else if (i_size_read(dst) <= datal) {
                struct btrfs_file_extent_item *ei;
@@ -222,13 +219,10 @@ static int clone_copy_inline_extent(struct inode *dst,
                    BTRFS_FILE_EXTENT_INLINE)
                        goto copy_inline_extent;
 
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
 copy_inline_extent:
-       ret = 0;
        /*
         * We have no extent items, or we have an extent at offset 0 which may
         * or may not be inlined. All these cases are dealt the same way.
@@ -240,11 +234,13 @@ copy_inline_extent:
                 * clone. Deal with all these cases by copying the inline extent
                 * data into the respective page at the destination inode.
                 */
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
+       /*
+        * Release path before starting a new transaction so we don't hold locks
+        * that would confuse lockdep.
+        */
        btrfs_release_path(path);
        /*
         * If we end up here it means were copy the inline extent into a leaf
@@ -282,11 +278,6 @@ copy_inline_extent:
 out:
        if (!ret && !trans) {
                /*
-                * Release path before starting a new transaction so we don't
-                * hold locks that would confuse lockdep.
-                */
-               btrfs_release_path(path);
-               /*
                 * No transaction here means we copied the inline extent into a
                 * page of the destination inode.
                 *
@@ -306,6 +297,21 @@ out:
                *trans_out = trans;
 
        return ret;
+
+copy_to_page:
+       /*
+        * Release our path because we don't need it anymore and also because
+        * copy_inline_to_page() needs to reserve data and metadata, which may
+        * need to flush delalloc when we are low on available space and
+        * therefore cause a deadlock if writeback of an inline extent needs to
+        * write to the same leaf or an ordered extent completion needs to write
+        * to the same leaf.
+        */
+       btrfs_release_path(path);
+
+       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+                                 inline_data, size, datal, comp_type);
+       goto out;
 }
 
 /**
index 326be57..dbcf8bb 100644 (file)
@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                        if (ret)
                                goto out;
 
-                       btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       if (ret)
+                               goto out;
                }
 
                ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
 
        if (nlink != inode->i_nlink) {
                set_nlink(inode, nlink);
-               btrfs_update_inode(trans, root, BTRFS_I(inode));
+               ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+               if (ret)
+                       goto out;
        }
        BTRFS_I(inode)->index_cnt = (u64)-1;
 
@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                        break;
 
                if (ret == 1) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
 
                ret = btrfs_del_item(trans, root, path);
                if (ret)
-                       goto out;
+                       break;
 
                btrfs_release_path(path);
                inode = read_one_inode(root, key.offset);
-               if (!inode)
-                       return -EIO;
+               if (!inode) {
+                       ret = -EIO;
+                       break;
+               }
 
                ret = fixup_inode_link_count(trans, root, inode);
                iput(inode);
                if (ret)
-                       goto out;
+                       break;
 
                /*
                 * fixup on a directory may create new entries,
@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                 */
                key.offset = (u64)-1;
        }
-       ret = 0;
-out:
        btrfs_release_path(path);
        return ret;
 }
@@ -3297,6 +3302,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         *    begins and releases it only after writing its superblock.
         */
        mutex_lock(&fs_info->tree_log_mutex);
+
+       /*
+        * The previous transaction writeout phase could have failed, and thus
+        * marked the fs in an error state.  We must not commit here, as we
+        * could have updated our generation in the super_for_commit and
+        * writing the super here would result in transid mismatches.  If there
+        * is an error here just bail.
+        */
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
+               ret = -EIO;
+               btrfs_set_log_full_commit(trans);
+               btrfs_abort_transaction(trans, ret);
+               mutex_unlock(&fs_info->tree_log_mutex);
+               goto out_wake_log_root;
+       }
+
        btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
        btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
        ret = write_all_supers(fs_info, 1);
index 1bb8ee9..f1f3b10 100644 (file)
@@ -150,6 +150,18 @@ static inline u32 sb_zone_number(int shift, int mirror)
        return (u32)zone;
 }
 
+static inline sector_t zone_start_sector(u32 zone_number,
+                                        struct block_device *bdev)
+{
+       return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev));
+}
+
+static inline u64 zone_start_physical(u32 zone_number,
+                                     struct btrfs_zoned_device_info *zone_info)
+{
+       return (u64)zone_number << zone_info->zone_size_shift;
+}
+
 /*
  * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
  * device into static sized chunks and fake a conventional zone on each of
@@ -405,8 +417,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
                if (sb_zone + 1 >= zone_info->nr_zones)
                        continue;
 
-               sector = sb_zone << (zone_info->zone_size_shift - SECTOR_SHIFT);
-               ret = btrfs_get_dev_zones(device, sector << SECTOR_SHIFT,
+               ret = btrfs_get_dev_zones(device,
+                                         zone_start_physical(sb_zone, zone_info),
                                          &zone_info->sb_zones[sb_pos],
                                          &nr_zones);
                if (ret)
@@ -721,7 +733,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
        if (sb_zone + 1 >= nr_zones)
                return -ENOENT;
 
-       ret = blkdev_report_zones(bdev, sb_zone << zone_sectors_shift,
+       ret = blkdev_report_zones(bdev, zone_start_sector(sb_zone, bdev),
                                  BTRFS_NR_SB_LOG_ZONES, copy_zone_info_cb,
                                  zones);
        if (ret < 0)
@@ -826,7 +838,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
                return -ENOENT;
 
        return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
-                               sb_zone << zone_sectors_shift,
+                               zone_start_sector(sb_zone, bdev),
                                zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
 }
 
@@ -878,7 +890,8 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
                        if (!(end <= sb_zone ||
                              sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
                                have_sb = true;
-                               pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
+                               pos = zone_start_physical(
+                                       sb_zone + BTRFS_NR_SB_LOG_ZONES, zinfo);
                                break;
                        }
 
index 4a97fe1..37fc7d6 100644 (file)
@@ -72,15 +72,28 @@ struct smb3_key_debug_info {
 } __packed;
 
 /*
- * Dump full key (32 byte encrypt/decrypt keys instead of 16 bytes)
- * is needed if GCM256 (stronger encryption) negotiated
+ * Dump variable-sized keys
  */
 struct smb3_full_key_debug_info {
-       __u64   Suid;
+       /* INPUT: size of userspace buffer */
+       __u32   in_size;
+
+       /*
+        * INPUT: 0 for current user, otherwise session to dump
+        * OUTPUT: session id that was dumped
+        */
+       __u64   session_id;
        __u16   cipher_type;
-       __u8    auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
-       __u8    smb3encryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
-       __u8    smb3decryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */
+       __u8    session_key_length;
+       __u8    server_in_key_length;
+       __u8    server_out_key_length;
+       __u8    data[];
+       /*
+        * return this struct with the keys appended at the end:
+        * __u8 session_key[session_key_length];
+        * __u8 server_in_key[server_in_key_length];
+        * __u8 server_out_key[server_out_key_length];
+        */
 } __packed;
 
 struct smb3_notify {
index b53a87d..554d64f 100644 (file)
 #define SMB3_SIGN_KEY_SIZE (16)
 
 /*
- * Size of the smb3 encryption/decryption keys
+ * Size of the smb3 encryption/decryption key storage.
+ * This size is big enough to store any cipher key types.
  */
 #define SMB3_ENC_DEC_KEY_SIZE (32)
 
index 28ec8d7..d67d281 100644 (file)
@@ -33,6 +33,7 @@
 #include "cifsfs.h"
 #include "cifs_ioctl.h"
 #include "smb2proto.h"
+#include "smb2glob.h"
 #include <linux/btrfs.h>
 
 static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
@@ -214,48 +215,112 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
        return 0;
 }
 
-static int cifs_dump_full_key(struct cifs_tcon *tcon, unsigned long arg)
+static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
 {
-       struct smb3_full_key_debug_info pfull_key_inf;
-       __u64 suid;
-       struct list_head *tmp;
+       struct smb3_full_key_debug_info out;
        struct cifs_ses *ses;
+       int rc = 0;
        bool found = false;
+       u8 __user *end;
 
-       if (!smb3_encryption_required(tcon))
-               return -EOPNOTSUPP;
+       if (!smb3_encryption_required(tcon)) {
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       /* copy user input into our output buffer */
+       if (copy_from_user(&out, in, sizeof(out))) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (!out.session_id) {
+               /* if ses id is 0, use current user session */
+               ses = tcon->ses;
+       } else {
+               /* otherwise if a session id is given, look for it in all our sessions */
+               struct cifs_ses *ses_it = NULL;
+               struct TCP_Server_Info *server_it = NULL;
 
-       ses = tcon->ses; /* default to user id for current user */
-       if (get_user(suid, (__u64 __user *)arg))
-               suid = 0;
-       if (suid) {
-               /* search to see if there is a session with a matching SMB UID */
                spin_lock(&cifs_tcp_ses_lock);
-               list_for_each(tmp, &tcon->ses->server->smb_ses_list) {
-                       ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
-                       if (ses->Suid == suid) {
-                               found = true;
-                               break;
+               list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
+                       list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
+                               if (ses_it->Suid == out.session_id) {
+                                       ses = ses_it;
+                                       /*
+                                        * since we are using the session outside the crit
+                                        * section, we need to make sure it won't be released
+                                        * so increment its refcount
+                                        */
+                                       ses->ses_count++;
+                                       found = true;
+                                       goto search_end;
+                               }
                        }
                }
+search_end:
                spin_unlock(&cifs_tcp_ses_lock);
-               if (found == false)
-                       return -EINVAL;
-       } /* else uses default user's SMB UID (ie current user) */
-
-       pfull_key_inf.cipher_type = le16_to_cpu(ses->server->cipher_type);
-       pfull_key_inf.Suid = ses->Suid;
-       memcpy(pfull_key_inf.auth_key, ses->auth_key.response,
-              16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
-       memcpy(pfull_key_inf.smb3decryptionkey, ses->smb3decryptionkey,
-              32 /* SMB3_ENC_DEC_KEY_SIZE */);
-       memcpy(pfull_key_inf.smb3encryptionkey,
-              ses->smb3encryptionkey, 32 /* SMB3_ENC_DEC_KEY_SIZE */);
-       if (copy_to_user((void __user *)arg, &pfull_key_inf,
-                        sizeof(struct smb3_full_key_debug_info)))
-               return -EFAULT;
+               if (!found) {
+                       rc = -ENOENT;
+                       goto out;
+               }
+       }
 
-       return 0;
+       switch (ses->server->cipher_type) {
+       case SMB2_ENCRYPTION_AES128_CCM:
+       case SMB2_ENCRYPTION_AES128_GCM:
+               out.session_key_length = CIFS_SESS_KEY_SIZE;
+               out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE;
+               break;
+       case SMB2_ENCRYPTION_AES256_CCM:
+       case SMB2_ENCRYPTION_AES256_GCM:
+               out.session_key_length = CIFS_SESS_KEY_SIZE;
+               out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE;
+               break;
+       default:
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       /* check if user buffer is big enough to store all the keys */
+       if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length
+           + out.server_out_key_length) {
+               rc = -ENOBUFS;
+               goto out;
+       }
+
+       out.session_id = ses->Suid;
+       out.cipher_type = le16_to_cpu(ses->server->cipher_type);
+
+       /* overwrite user input with our output */
+       if (copy_to_user(in, &out, sizeof(out))) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* append all the keys at the end of the user buffer */
+       end = in->data;
+       if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) {
+               rc = -EINVAL;
+               goto out;
+       }
+       end += out.session_key_length;
+
+       if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) {
+               rc = -EINVAL;
+               goto out;
+       }
+       end += out.server_in_key_length;
+
+       if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       if (found)
+               cifs_put_smb_ses(ses);
+       return rc;
 }
 
 long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
@@ -371,6 +436,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                rc = -EOPNOTSUPP;
                        break;
                case CIFS_DUMP_KEY:
+                       /*
+                        * Dump encryption keys. This is an old ioctl that only
+                        * handles AES-128-{CCM,GCM}.
+                        */
                        if (pSMBFile == NULL)
                                break;
                        if (!capable(CAP_SYS_ADMIN)) {
@@ -398,11 +467,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        else
                                rc = 0;
                        break;
-               /*
-                * Dump full key (32 bytes instead of 16 bytes) is
-                * needed if GCM256 (stronger encryption) negotiated
-                */
                case CIFS_DUMP_FULL_KEY:
+                       /*
+                        * Dump encryption keys (handles any key sizes)
+                        */
                        if (pSMBFile == NULL)
                                break;
                        if (!capable(CAP_SYS_ADMIN)) {
@@ -410,8 +478,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                break;
                        }
                        tcon = tlink_tcon(pSMBFile->tlink);
-                       rc = cifs_dump_full_key(tcon, arg);
-
+                       rc = cifs_dump_full_key(tcon, (void __user *)arg);
                        break;
                case CIFS_IOC_NOTIFY:
                        if (!S_ISDIR(inode->i_mode)) {
index 9f24eb8..c205f93 100644 (file)
@@ -958,6 +958,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        /* Internal types */
        server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
 
+       /*
+        * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
+        * Set the cipher type manually.
+        */
+       if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+               server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
+
        security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
                                               (struct smb2_sync_hdr *)rsp);
        /*
index d6df908..dafcb6a 100644 (file)
 
 #include <linux/tracepoint.h>
 
+/*
+ * Please use this 3-part article as a reference for writing new tracepoints:
+ * https://lwn.net/Articles/379903/
+ */
+
 /* For logging errors in read or write */
 DECLARE_EVENT_CLASS(smb3_rw_err_class,
        TP_PROTO(unsigned int xid,
@@ -529,16 +534,16 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
        TP_ARGS(xid, func_name, rc),
        TP_STRUCT__entry(
                __field(unsigned int, xid)
-               __field(const char *, func_name)
+               __string(func_name, func_name)
                __field(int, rc)
        ),
        TP_fast_assign(
                __entry->xid = xid;
-               __entry->func_name = func_name;
+               __assign_str(func_name, func_name);
                __entry->rc = rc;
        ),
        TP_printk("\t%s: xid=%u rc=%d",
-               __entry->func_name, __entry->xid, __entry->rc)
+               __get_str(func_name), __entry->xid, __entry->rc)
 )
 
 #define DEFINE_SMB3_EXIT_ERR_EVENT(name)          \
@@ -583,14 +588,14 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
        TP_ARGS(xid, func_name),
        TP_STRUCT__entry(
                __field(unsigned int, xid)
-               __field(const char *, func_name)
+               __string(func_name, func_name)
        ),
        TP_fast_assign(
                __entry->xid = xid;
-               __entry->func_name = func_name;
+               __assign_str(func_name, func_name);
        ),
        TP_printk("\t%s: xid=%u",
-               __entry->func_name, __entry->xid)
+               __get_str(func_name), __entry->xid)
 )
 
 #define DEFINE_SMB3_ENTER_EXIT_EVENT(name)        \
@@ -857,16 +862,16 @@ DECLARE_EVENT_CLASS(smb3_reconnect_class,
        TP_STRUCT__entry(
                __field(__u64, currmid)
                __field(__u64, conn_id)
-               __field(char *, hostname)
+               __string(hostname, hostname)
        ),
        TP_fast_assign(
                __entry->currmid = currmid;
                __entry->conn_id = conn_id;
-               __entry->hostname = hostname;
+               __assign_str(hostname, hostname);
        ),
        TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
                __entry->conn_id,
-               __entry->hostname,
+               __get_str(hostname),
                __entry->currmid)
 )
 
@@ -891,7 +896,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
        TP_STRUCT__entry(
                __field(__u64, currmid)
                __field(__u64, conn_id)
-               __field(char *, hostname)
+               __string(hostname, hostname)
                __field(int, credits)
                __field(int, credits_to_add)
                __field(int, in_flight)
@@ -899,7 +904,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
        TP_fast_assign(
                __entry->currmid = currmid;
                __entry->conn_id = conn_id;
-               __entry->hostname = hostname;
+               __assign_str(hostname, hostname);
                __entry->credits = credits;
                __entry->credits_to_add = credits_to_add;
                __entry->in_flight = in_flight;
@@ -907,7 +912,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class,
        TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
                        "credits=%d credit_change=%d in_flight=%d",
                __entry->conn_id,
-               __entry->hostname,
+               __get_str(hostname),
                __entry->currmid,
                __entry->credits,
                __entry->credits_to_add,
index 2868e3e..c3d8fc1 100644 (file)
@@ -519,7 +519,7 @@ static bool dump_interrupted(void)
         * but then we need to teach dump_write() to restart and clear
         * TIF_SIGPENDING.
         */
-       return signal_pending(current);
+       return fatal_signal_pending(current) || freezing(current);
 }
 
 static void wait_for_dump_helpers(struct file *file)
index e813acf..ba7c01c 100644 (file)
@@ -893,7 +893,7 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
 
        copy[copy_len] = '\n';
 
-       ret = simple_read_from_buffer(user_buf, count, ppos, copy, copy_len);
+       ret = simple_read_from_buffer(user_buf, count, ppos, copy, len);
        kfree(copy);
 
        return ret;
index 1d25216..8129a43 100644 (file)
@@ -45,10 +45,13 @@ static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
 static int debugfs_setattr(struct user_namespace *mnt_userns,
                           struct dentry *dentry, struct iattr *ia)
 {
-       int ret = security_locked_down(LOCKDOWN_DEBUGFS);
+       int ret;
 
-       if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
-               return ret;
+       if (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) {
+               ret = security_locked_down(LOCKDOWN_DEBUGFS);
+               if (ret)
+                       return ret;
+       }
        return simple_setattr(&init_user_ns, dentry, ia);
 }
 
index 77c84d6..cbf37b2 100644 (file)
@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle,
                ext4_ext_mark_unwritten(ex2);
 
        err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
-       if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+       if (err != -ENOSPC && err != -EDQUOT)
+               goto out;
+
+       if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
                if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
                        if (split_flag & EXT4_EXT_DATA_VALID1) {
                                err = ext4_ext_zeroout(inode, ex2);
@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle,
                                              ext4_ext_pblock(&orig_ex));
                }
 
-               if (err)
-                       goto fix_extent_len;
-               /* update the extent length and mark as initialized */
-               ex->ee_len = cpu_to_le16(ee_len);
-               ext4_ext_try_to_merge(handle, inode, path, ex);
-               err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-               if (err)
-                       goto fix_extent_len;
-
-               /* update extent status tree */
-               err = ext4_zeroout_es(inode, &zero_ex);
-
-               goto out;
-       } else if (err)
-               goto fix_extent_len;
-
-out:
-       ext4_ext_show_leaf(inode, path);
-       return err;
+               if (!err) {
+                       /* update the extent length and mark as initialized */
+                       ex->ee_len = cpu_to_le16(ee_len);
+                       ext4_ext_try_to_merge(handle, inode, path, ex);
+                       err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+                       if (!err)
+                               /* update extent status tree */
+                               err = ext4_zeroout_es(inode, &zero_ex);
+                       /* If we failed at this point, we don't know in which
+                        * state the extent tree exactly is so don't try to fix
+                        * length of the original extent as it may do even more
+                        * damage.
+                        */
+                       goto out;
+               }
+       }
 
 fix_extent_len:
        ex->ee_len = orig_ex.ee_len;
@@ -3260,6 +3260,9 @@ fix_extent_len:
         */
        ext4_ext_dirty(handle, inode, path + path->p_depth);
        return err;
+out:
+       ext4_ext_show_leaf(inode, path);
+       return err;
 }
 
 /*
index f98ca4f..e819522 100644 (file)
@@ -1288,28 +1288,29 @@ struct dentry_info_args {
 };
 
 static inline void tl_to_darg(struct dentry_info_args *darg,
-                               struct  ext4_fc_tl *tl)
+                             struct  ext4_fc_tl *tl, u8 *val)
 {
-       struct ext4_fc_dentry_info *fcd;
+       struct ext4_fc_dentry_info fcd;
 
-       fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl);
+       memcpy(&fcd, val, sizeof(fcd));
 
-       darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino);
-       darg->ino = le32_to_cpu(fcd->fc_ino);
-       darg->dname = fcd->fc_dname;
-       darg->dname_len = ext4_fc_tag_len(tl) -
-                       sizeof(struct ext4_fc_dentry_info);
+       darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
+       darg->ino = le32_to_cpu(fcd.fc_ino);
+       darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
+       darg->dname_len = le16_to_cpu(tl->fc_len) -
+               sizeof(struct ext4_fc_dentry_info);
 }
 
 /* Unlink replay function */
-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+                                u8 *val)
 {
        struct inode *inode, *old_parent;
        struct qstr entry;
        struct dentry_info_args darg;
        int ret = 0;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
                        darg.parent_ino, darg.dname_len);
@@ -1399,13 +1400,14 @@ out:
 }
 
 /* Link replay function */
-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
+                              u8 *val)
 {
        struct inode *inode;
        struct dentry_info_args darg;
        int ret = 0;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
                        darg.parent_ino, darg.dname_len);
 
@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
 /*
  * Inode replay function
  */
-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
+                               u8 *val)
 {
-       struct ext4_fc_inode *fc_inode;
+       struct ext4_fc_inode fc_inode;
        struct ext4_inode *raw_inode;
        struct ext4_inode *raw_fc_inode;
        struct inode *inode = NULL;
@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
        int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag);
        struct ext4_extent_header *eh;
 
-       fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl);
+       memcpy(&fc_inode, val, sizeof(fc_inode));
 
-       ino = le32_to_cpu(fc_inode->fc_ino);
+       ino = le32_to_cpu(fc_inode.fc_ino);
        trace_ext4_fc_replay(sb, tag, ino, 0, 0);
 
        inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
 
        ext4_fc_record_modified_inode(sb, ino);
 
-       raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode;
+       raw_fc_inode = (struct ext4_inode *)
+               (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
        ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
        if (ret)
                goto out;
 
-       inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode);
+       inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode);
        raw_inode = ext4_raw_inode(&iloc);
 
        memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
@@ -1547,14 +1551,15 @@ out:
  * inode for which we are trying to create a dentry here, should already have
  * been replayed before we start here.
  */
-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
+static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
+                                u8 *val)
 {
        int ret = 0;
        struct inode *inode = NULL;
        struct inode *dir = NULL;
        struct dentry_info_args darg;
 
-       tl_to_darg(&darg, tl);
+       tl_to_darg(&darg, tl, val);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
                        darg.parent_ino, darg.dname_len);
@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
 
 /* Replay add range tag */
 static int ext4_fc_replay_add_range(struct super_block *sb,
-                               struct ext4_fc_tl *tl)
+                                   struct ext4_fc_tl *tl, u8 *val)
 {
-       struct ext4_fc_add_range *fc_add_ex;
+       struct ext4_fc_add_range fc_add_ex;
        struct ext4_extent newex, *ex;
        struct inode *inode;
        ext4_lblk_t start, cur;
@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
        struct ext4_ext_path *path = NULL;
        int ret;
 
-       fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
-       ex = (struct ext4_extent *)&fc_add_ex->fc_ex;
+       memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
+       ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
-               le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block),
+               le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
                ext4_ext_get_actual_len(ex));
 
-       inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
-                               EXT4_IGET_NORMAL);
+       inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
        if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return 0;
@@ -1762,32 +1766,33 @@ next:
 
 /* Replay DEL_RANGE tag */
 static int
-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
+ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
+                        u8 *val)
 {
        struct inode *inode;
-       struct ext4_fc_del_range *lrange;
+       struct ext4_fc_del_range lrange;
        struct ext4_map_blocks map;
        ext4_lblk_t cur, remaining;
        int ret;
 
-       lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl);
-       cur = le32_to_cpu(lrange->fc_lblk);
-       remaining = le32_to_cpu(lrange->fc_len);
+       memcpy(&lrange, val, sizeof(lrange));
+       cur = le32_to_cpu(lrange.fc_lblk);
+       remaining = le32_to_cpu(lrange.fc_len);
 
        trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
-               le32_to_cpu(lrange->fc_ino), cur, remaining);
+               le32_to_cpu(lrange.fc_ino), cur, remaining);
 
-       inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
+       inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
        if (IS_ERR(inode)) {
-               jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
+               jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino));
                return 0;
        }
 
        ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
 
        jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
-                       inode->i_ino, le32_to_cpu(lrange->fc_lblk),
-                       le32_to_cpu(lrange->fc_len));
+                       inode->i_ino, le32_to_cpu(lrange.fc_lblk),
+                       le32_to_cpu(lrange.fc_len));
        while (remaining > 0) {
                map.m_lblk = cur;
                map.m_len = remaining;
@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
        }
 
        ret = ext4_punch_hole(inode,
-               le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits,
-               le32_to_cpu(lrange->fc_len) <<  sb->s_blocksize_bits);
+               le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits,
+               le32_to_cpu(lrange.fc_len) <<  sb->s_blocksize_bits);
        if (ret)
                jbd_debug(1, "ext4_punch_hole returned %d", ret);
        ext4_ext_replay_shrink_inode(inode,
@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_fc_replay_state *state;
        int ret = JBD2_FC_REPLAY_CONTINUE;
-       struct ext4_fc_add_range *ext;
-       struct ext4_fc_tl *tl;
-       struct ext4_fc_tail *tail;
-       __u8 *start, *end;
-       struct ext4_fc_head *head;
+       struct ext4_fc_add_range ext;
+       struct ext4_fc_tl tl;
+       struct ext4_fc_tail tail;
+       __u8 *start, *end, *cur, *val;
+       struct ext4_fc_head head;
        struct ext4_extent *ex;
 
        state = &sbi->s_fc_replay_state;
@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal,
        }
 
        state->fc_replay_expected_off++;
-       fc_for_each_tl(start, end, tl) {
+       for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+               memcpy(&tl, cur, sizeof(tl));
+               val = cur + sizeof(tl);
                jbd_debug(3, "Scan phase, tag:%s, blk %lld\n",
-                         tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr);
-               switch (le16_to_cpu(tl->fc_tag)) {
+                         tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr);
+               switch (le16_to_cpu(tl.fc_tag)) {
                case EXT4_FC_TAG_ADD_RANGE:
-                       ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl);
-                       ex = (struct ext4_extent *)&ext->fc_ex;
+                       memcpy(&ext, val, sizeof(ext));
+                       ex = (struct ext4_extent *)&ext.fc_ex;
                        ret = ext4_fc_record_regions(sb,
-                               le32_to_cpu(ext->fc_ino),
+                               le32_to_cpu(ext.fc_ino),
                                le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
                                ext4_ext_get_actual_len(ex));
                        if (ret < 0)
@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal,
                case EXT4_FC_TAG_INODE:
                case EXT4_FC_TAG_PAD:
                        state->fc_cur_tag++;
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                       sizeof(*tl) + ext4_fc_tag_len(tl));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                       sizeof(tl) + le16_to_cpu(tl.fc_len));
                        break;
                case EXT4_FC_TAG_TAIL:
                        state->fc_cur_tag++;
-                       tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                               sizeof(*tl) +
+                       memcpy(&tail, val, sizeof(tail));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                               sizeof(tl) +
                                                offsetof(struct ext4_fc_tail,
                                                fc_crc));
-                       if (le32_to_cpu(tail->fc_tid) == expected_tid &&
-                               le32_to_cpu(tail->fc_crc) == state->fc_crc) {
+                       if (le32_to_cpu(tail.fc_tid) == expected_tid &&
+                               le32_to_cpu(tail.fc_crc) == state->fc_crc) {
                                state->fc_replay_num_tags = state->fc_cur_tag;
                                state->fc_regions_valid =
                                        state->fc_regions_used;
@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal,
                        state->fc_crc = 0;
                        break;
                case EXT4_FC_TAG_HEAD:
-                       head = (struct ext4_fc_head *)ext4_fc_tag_val(tl);
-                       if (le32_to_cpu(head->fc_features) &
+                       memcpy(&head, val, sizeof(head));
+                       if (le32_to_cpu(head.fc_features) &
                                ~EXT4_FC_SUPPORTED_FEATURES) {
                                ret = -EOPNOTSUPP;
                                break;
                        }
-                       if (le32_to_cpu(head->fc_tid) != expected_tid) {
+                       if (le32_to_cpu(head.fc_tid) != expected_tid) {
                                ret = JBD2_FC_REPLAY_STOP;
                                break;
                        }
                        state->fc_cur_tag++;
-                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl,
-                                       sizeof(*tl) + ext4_fc_tag_len(tl));
+                       state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
+                                           sizeof(tl) + le16_to_cpu(tl.fc_len));
                        break;
                default:
                        ret = state->fc_replay_num_tags ?
@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
 {
        struct super_block *sb = journal->j_private;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_fc_tl *tl;
-       __u8 *start, *end;
+       struct ext4_fc_tl tl;
+       __u8 *start, *end, *cur, *val;
        int ret = JBD2_FC_REPLAY_CONTINUE;
        struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
-       struct ext4_fc_tail *tail;
+       struct ext4_fc_tail tail;
 
        if (pass == PASS_SCAN) {
                state->fc_current_pass = PASS_SCAN;
@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
        start = (u8 *)bh->b_data;
        end = (__u8 *)bh->b_data + journal->j_blocksize - 1;
 
-       fc_for_each_tl(start, end, tl) {
+       for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) {
+               memcpy(&tl, cur, sizeof(tl));
+               val = cur + sizeof(tl);
+
                if (state->fc_replay_num_tags == 0) {
                        ret = JBD2_FC_REPLAY_STOP;
                        ext4_fc_set_bitmaps_and_counters(sb);
                        break;
                }
                jbd_debug(3, "Replay phase, tag:%s\n",
-                               tag2str(le16_to_cpu(tl->fc_tag)));
+                               tag2str(le16_to_cpu(tl.fc_tag)));
                state->fc_replay_num_tags--;
-               switch (le16_to_cpu(tl->fc_tag)) {
+               switch (le16_to_cpu(tl.fc_tag)) {
                case EXT4_FC_TAG_LINK:
-                       ret = ext4_fc_replay_link(sb, tl);
+                       ret = ext4_fc_replay_link(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_UNLINK:
-                       ret = ext4_fc_replay_unlink(sb, tl);
+                       ret = ext4_fc_replay_unlink(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_ADD_RANGE:
-                       ret = ext4_fc_replay_add_range(sb, tl);
+                       ret = ext4_fc_replay_add_range(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_CREAT:
-                       ret = ext4_fc_replay_create(sb, tl);
+                       ret = ext4_fc_replay_create(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_DEL_RANGE:
-                       ret = ext4_fc_replay_del_range(sb, tl);
+                       ret = ext4_fc_replay_del_range(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_INODE:
-                       ret = ext4_fc_replay_inode(sb, tl);
+                       ret = ext4_fc_replay_inode(sb, &tl, val);
                        break;
                case EXT4_FC_TAG_PAD:
                        trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
-                               ext4_fc_tag_len(tl), 0);
+                                            le16_to_cpu(tl.fc_len), 0);
                        break;
                case EXT4_FC_TAG_TAIL:
                        trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0,
-                               ext4_fc_tag_len(tl), 0);
-                       tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl);
-                       WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid);
+                                            le16_to_cpu(tl.fc_len), 0);
+                       memcpy(&tail, val, sizeof(tail));
+                       WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
                        break;
                case EXT4_FC_TAG_HEAD:
                        break;
                default:
-                       trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0,
-                               ext4_fc_tag_len(tl), 0);
+                       trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0,
+                                            le16_to_cpu(tl.fc_len), 0);
                        ret = -ECANCELED;
                        break;
                }
index b77f70f..937c381 100644 (file)
@@ -153,13 +153,6 @@ struct ext4_fc_replay_state {
 #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1)
 #endif
 
-#define fc_for_each_tl(__start, __end, __tl)                           \
-       for (tl = (struct ext4_fc_tl *)(__start);                       \
-            (__u8 *)tl < (__u8 *)(__end);                              \
-               tl = (struct ext4_fc_tl *)((__u8 *)tl +                 \
-                                       sizeof(struct ext4_fc_tl) +     \
-                                       + le16_to_cpu(tl->fc_len)))
-
 static inline const char *tag2str(__u16 tag)
 {
        switch (tag) {
@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag)
        }
 }
 
-/* Get length of a particular tlv */
-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl)
-{
-       return le16_to_cpu(tl->fc_len);
-}
-
-/* Get a pointer to "value" of a tlv */
-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl)
-{
-       return (__u8 *)tl + sizeof(*tl);
-}
-
 #endif /* __FAST_COMMIT_H__ */
index 81a17a3..9bab7fd 100644 (file)
@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
        if (is_directory) {
                count = ext4_used_dirs_count(sb, gdp) - 1;
                ext4_used_dirs_set(sb, gdp, count);
-               percpu_counter_dec(&sbi->s_dirs_counter);
+               if (percpu_counter_initialized(&sbi->s_dirs_counter))
+                       percpu_counter_dec(&sbi->s_dirs_counter);
        }
        ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
                                   EXT4_INODES_PER_GROUP(sb) / 8);
        ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
 
-       percpu_counter_inc(&sbi->s_freeinodes_counter);
+       if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
+               percpu_counter_inc(&sbi->s_freeinodes_counter);
        if (sbi->s_log_groups_per_flex) {
                struct flex_groups *fg;
 
index 3239e66..c2c22c2 100644 (file)
@@ -3217,7 +3217,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
                 */
                if (sbi->s_es->s_log_groups_per_flex >= 32) {
                        ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
-                       goto err_freesgi;
+                       goto err_freebuddy;
                }
                sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
                        BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
index afb9d05..a4af26d 100644 (file)
@@ -1376,7 +1376,8 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
        struct dx_hash_info *hinfo = &name->hinfo;
        int len;
 
-       if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding) {
+       if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding ||
+           (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) {
                cf_name->name = NULL;
                return 0;
        }
@@ -1427,7 +1428,8 @@ static bool ext4_match(struct inode *parent,
 #endif
 
 #ifdef CONFIG_UNICODE
-       if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent)) {
+       if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
+           (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
                if (fname->cf_name.name) {
                        struct qstr cf = {.name = fname->cf_name.name,
                                          .len = fname->cf_name.len};
index 7dc94f3..d29f6aa 100644 (file)
@@ -4462,14 +4462,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        if (sb->s_blocksize != blocksize) {
+               /*
+                * bh must be released before kill_bdev(), otherwise
+                * it won't be freed and its page also. kill_bdev()
+                * is called by sb_set_blocksize().
+                */
+               brelse(bh);
                /* Validate the filesystem blocksize */
                if (!sb_set_blocksize(sb, blocksize)) {
                        ext4_msg(sb, KERN_ERR, "bad block size %d",
                                        blocksize);
+                       bh = NULL;
                        goto failed_mount;
                }
 
-               brelse(bh);
                logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
                offset = do_div(logical_sb_block, blocksize);
                bh = ext4_sb_bread_unmovable(sb, logical_sb_block);
@@ -5202,8 +5208,9 @@ failed_mount:
                kfree(get_qf_name(sb, sbi, i));
 #endif
        fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
-       ext4_blkdev_remove(sbi);
+       /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
        brelse(bh);
+       ext4_blkdev_remove(sbi);
 out_fail:
        sb->s_fs_info = NULL;
        kfree(sbi->s_blockgroup_lock);
index 6f825de..55fcab6 100644 (file)
@@ -315,7 +315,9 @@ EXT4_ATTR_FEATURE(verity);
 #endif
 EXT4_ATTR_FEATURE(metadata_csum_seed);
 EXT4_ATTR_FEATURE(fast_commit);
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
 EXT4_ATTR_FEATURE(encrypted_casefold);
+#endif
 
 static struct attribute *ext4_feat_attrs[] = {
        ATTR_LIST(lazy_itable_init),
@@ -333,7 +335,9 @@ static struct attribute *ext4_feat_attrs[] = {
 #endif
        ATTR_LIST(metadata_csum_seed),
        ATTR_LIST(fast_commit),
+#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
        ATTR_LIST(encrypted_casefold),
+#endif
        NULL,
 };
 ATTRIBUTE_GROUPS(ext4_feat);
index a0b542d..493a83e 100644 (file)
@@ -911,8 +911,11 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                current->backing_dev_info = inode_to_bdi(inode);
                buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
                current->backing_dev_info = NULL;
-               if (unlikely(buffered <= 0))
+               if (unlikely(buffered <= 0)) {
+                       if (!ret)
+                               ret = buffered;
                        goto out_unlock;
+               }
 
                /*
                 * We need to ensure that the page cache pages are written to
index ea7fc5c..d9cb261 100644 (file)
@@ -582,6 +582,16 @@ out_locked:
        spin_unlock(&gl->gl_lockref.lock);
 }
 
+static bool is_system_glock(struct gfs2_glock *gl)
+{
+       struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+       struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+       if (gl == m_ip->i_gl)
+               return true;
+       return false;
+}
+
 /**
  * do_xmote - Calls the DLM to change the state of a lock
  * @gl: The lock state
@@ -671,17 +681,25 @@ skip_inval:
         * to see sd_log_error and withdraw, and in the meantime, requeue the
         * work for later.
         *
+        * We make a special exception for some system glocks, such as the
+        * system statfs inode glock, which needs to be granted before the
+        * gfs2_quotad daemon can exit, and that exit needs to finish before
+        * we can unmount the withdrawn file system.
+        *
         * However, if we're just unlocking the lock (say, for unmount, when
         * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
         * then it's okay to tell dlm to unlock it.
         */
        if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
                gfs2_withdraw_delayed(sdp);
-       if (glock_blocked_by_withdraw(gl)) {
-               if (target != LM_ST_UNLOCKED ||
-                   test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) {
+       if (glock_blocked_by_withdraw(gl) &&
+           (target != LM_ST_UNLOCKED ||
+            test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
+               if (!is_system_glock(gl)) {
                        gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
                        goto out;
+               } else {
+                       clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
                }
        }
 
@@ -1466,9 +1484,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
            glock_blocked_by_withdraw(gl) &&
            gh->gh_gl != sdp->sd_jinode_gl) {
                sdp->sd_glock_dqs_held++;
+               spin_unlock(&gl->gl_lockref.lock);
                might_sleep();
                wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
                            TASK_UNINTERRUPTIBLE);
+               spin_lock(&gl->gl_lockref.lock);
        }
        if (gh->gh_flags & GL_NOCACHE)
                handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1775,6 +1795,7 @@ __acquires(&lru_lock)
        while(!list_empty(list)) {
                gl = list_first_entry(list, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
+               clear_bit(GLF_LRU, &gl->gl_flags);
                if (!spin_trylock(&gl->gl_lockref.lock)) {
 add_back_to_lru:
                        list_add(&gl->gl_lru, &lru_list);
@@ -1820,7 +1841,6 @@ static long gfs2_scan_glock_lru(int nr)
                if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
                        list_move(&gl->gl_lru, &dispose);
                        atomic_dec(&lru_count);
-                       clear_bit(GLF_LRU, &gl->gl_flags);
                        freed++;
                        continue;
                }
index 454095e..54d3fbe 100644 (file)
@@ -396,7 +396,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
        struct timespec64 atime;
        u16 height, depth;
        umode_t mode = be32_to_cpu(str->di_mode);
-       bool is_new = ip->i_inode.i_flags & I_NEW;
+       bool is_new = ip->i_inode.i_state & I_NEW;
 
        if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
                goto corrupt;
index 97d54e5..42c15cf 100644 (file)
@@ -926,10 +926,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 }
 
 /**
- * ail_drain - drain the ail lists after a withdraw
+ * gfs2_ail_drain - drain the ail lists after a withdraw
  * @sdp: Pointer to GFS2 superblock
  */
-static void ail_drain(struct gfs2_sbd *sdp)
+void gfs2_ail_drain(struct gfs2_sbd *sdp)
 {
        struct gfs2_trans *tr;
 
@@ -956,6 +956,7 @@ static void ail_drain(struct gfs2_sbd *sdp)
                list_del(&tr->tr_list);
                gfs2_trans_free(sdp, tr);
        }
+       gfs2_drain_revokes(sdp);
        spin_unlock(&sdp->sd_ail_lock);
 }
 
@@ -1162,7 +1163,6 @@ out_withdraw:
        if (tr && list_empty(&tr->tr_list))
                list_add(&tr->tr_list, &sdp->sd_ail1_list);
        spin_unlock(&sdp->sd_ail_lock);
-       ail_drain(sdp); /* frees all transactions */
        tr = NULL;
        goto out_end;
 }
index eea5801..fc905c2 100644 (file)
@@ -93,5 +93,6 @@ extern int gfs2_logd(void *data);
 extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
 extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
 extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
 
 #endif /* __LOG_DOT_H__ */
index 221e711..8ee05d2 100644 (file)
@@ -885,7 +885,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        gfs2_log_write_page(sdp, page);
 }
 
-static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+void gfs2_drain_revokes(struct gfs2_sbd *sdp)
 {
        struct list_head *head = &sdp->sd_log_revokes;
        struct gfs2_bufdata *bd;
@@ -900,6 +900,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        }
 }
 
+static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+       gfs2_drain_revokes(sdp);
+}
+
 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
                                  struct gfs2_log_header_host *head, int pass)
 {
index 31b6dd0..f707601 100644 (file)
@@ -20,6 +20,7 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf);
 extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
 extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
                           struct gfs2_log_header_host *head, bool keep_cache);
+extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
 static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
 {
        return sdp->sd_ldptrs;
index 3e08027..f4325b4 100644 (file)
@@ -131,6 +131,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
        if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
                return;
 
+       gfs2_ail_drain(sdp); /* frees all transactions */
        inode = sdp->sd_jdesc->jd_inode;
        ip = GFS2_I(inode);
        i_gl = ip->i_gl;
index 55efd3d..30dee68 100644 (file)
@@ -735,6 +735,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
                __SetPageUptodate(page);
                error = huge_add_to_page_cache(page, mapping, index);
                if (unlikely(error)) {
+                       restore_reserve_on_error(h, &pseudo_vma, addr, page);
                        put_page(page);
                        mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                        goto out;
index 5361a9b..b3e8624 100644 (file)
@@ -979,13 +979,16 @@ static bool io_task_work_match(struct callback_head *cb, void *data)
        return cwd->wqe->wq == data;
 }
 
+void io_wq_exit_start(struct io_wq *wq)
+{
+       set_bit(IO_WQ_BIT_EXIT, &wq->state);
+}
+
 static void io_wq_exit_workers(struct io_wq *wq)
 {
        struct callback_head *cb;
        int node;
 
-       set_bit(IO_WQ_BIT_EXIT, &wq->state);
-
        if (!wq->task)
                return;
 
@@ -1003,13 +1006,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
                struct io_wqe *wqe = wq->wqes[node];
 
                io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
-               spin_lock_irq(&wq->hash->wait.lock);
-               list_del_init(&wq->wqes[node]->wait.entry);
-               spin_unlock_irq(&wq->hash->wait.lock);
        }
        rcu_read_unlock();
        io_worker_ref_put(wq);
        wait_for_completion(&wq->worker_done);
+
+       for_each_node(node) {
+               spin_lock_irq(&wq->hash->wait.lock);
+               list_del_init(&wq->wqes[node]->wait.entry);
+               spin_unlock_irq(&wq->hash->wait.lock);
+       }
        put_task_struct(wq->task);
        wq->task = NULL;
 }
@@ -1020,8 +1026,6 @@ static void io_wq_destroy(struct io_wq *wq)
 
        cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
 
-       io_wq_exit_workers(wq);
-
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
                struct io_cb_cancel_data match = {
@@ -1036,16 +1040,13 @@ static void io_wq_destroy(struct io_wq *wq)
        kfree(wq);
 }
 
-void io_wq_put(struct io_wq *wq)
-{
-       if (refcount_dec_and_test(&wq->refs))
-               io_wq_destroy(wq);
-}
-
 void io_wq_put_and_exit(struct io_wq *wq)
 {
+       WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
+
        io_wq_exit_workers(wq);
-       io_wq_put(wq);
+       if (refcount_dec_and_test(&wq->refs))
+               io_wq_destroy(wq);
 }
 
 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
index 0e6d310..af2df06 100644 (file)
@@ -122,7 +122,7 @@ struct io_wq_data {
 };
 
 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
-void io_wq_put(struct io_wq *wq);
+void io_wq_exit_start(struct io_wq *wq);
 void io_wq_put_and_exit(struct io_wq *wq);
 
 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
index 5f82954..fa8794c 100644 (file)
@@ -783,6 +783,11 @@ struct io_task_work {
        task_work_func_t        func;
 };
 
+enum {
+       IORING_RSRC_FILE                = 0,
+       IORING_RSRC_BUFFER              = 1,
+};
+
 /*
  * NOTE! Each of the iocb union members has the file pointer
  * as the first entry in their struct definition. So you can
@@ -8228,6 +8233,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
 {
        int i, ret;
 
+       imu->acct_pages = 0;
        for (i = 0; i < nr_pages; i++) {
                if (!PageCompound(pages[i])) {
                        imu->acct_pages++;
@@ -9039,11 +9045,16 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
        struct io_tctx_node *node;
        unsigned long index;
 
-       tctx->io_wq = NULL;
        xa_for_each(&tctx->xa, index, node)
                io_uring_del_task_file(index);
-       if (wq)
+       if (wq) {
+               /*
+                * Must be after io_uring_del_task_file() (removes nodes under
+                * uring_lock) to avoid race with io_uring_try_cancel_iowq().
+                */
+               tctx->io_wq = NULL;
                io_wq_put_and_exit(wq);
+       }
 }
 
 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
@@ -9078,6 +9089,9 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
 
        if (!current->io_uring)
                return;
+       if (tctx->io_wq)
+               io_wq_exit_start(tctx->io_wq);
+
        WARN_ON_ONCE(!sqd || sqd->thread != current);
 
        atomic_inc(&tctx->in_idle);
@@ -9112,6 +9126,9 @@ void __io_uring_cancel(struct files_struct *files)
        DEFINE_WAIT(wait);
        s64 inflight;
 
+       if (tctx->io_wq)
+               io_wq_exit_start(tctx->io_wq);
+
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
        do {
@@ -9659,7 +9676,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
                        IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
                        IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
-                       IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
+                       IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
+                       IORING_FEAT_RSRC_TAGS;
 
        if (copy_to_user(params, p, sizeof(*p))) {
                ret = -EFAULT;
@@ -9899,7 +9917,7 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
 }
 
 static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
-                                  unsigned size)
+                                  unsigned size, unsigned type)
 {
        struct io_uring_rsrc_update2 up;
 
@@ -9907,13 +9925,13 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
-       if (!up.nr)
+       if (!up.nr || up.resv)
                return -EINVAL;
-       return __io_register_rsrc_update(ctx, up.type, &up, up.nr);
+       return __io_register_rsrc_update(ctx, type, &up, up.nr);
 }
 
 static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
-                           unsigned int size)
+                           unsigned int size, unsigned int type)
 {
        struct io_uring_rsrc_register rr;
 
@@ -9924,10 +9942,10 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
        memset(&rr, 0, sizeof(rr));
        if (copy_from_user(&rr, arg, size))
                return -EFAULT;
-       if (!rr.nr)
+       if (!rr.nr || rr.resv || rr.resv2)
                return -EINVAL;
 
-       switch (rr.type) {
+       switch (type) {
        case IORING_RSRC_FILE:
                return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
                                             rr.nr, u64_to_user_ptr(rr.tags));
@@ -9949,8 +9967,10 @@ static bool io_register_op_must_quiesce(int op)
        case IORING_REGISTER_PROBE:
        case IORING_REGISTER_PERSONALITY:
        case IORING_UNREGISTER_PERSONALITY:
-       case IORING_REGISTER_RSRC:
-       case IORING_REGISTER_RSRC_UPDATE:
+       case IORING_REGISTER_FILES2:
+       case IORING_REGISTER_FILES_UPDATE2:
+       case IORING_REGISTER_BUFFERS2:
+       case IORING_REGISTER_BUFFERS_UPDATE:
                return false;
        default:
                return true;
@@ -10076,11 +10096,19 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
        case IORING_REGISTER_RESTRICTIONS:
                ret = io_register_restrictions(ctx, arg, nr_args);
                break;
-       case IORING_REGISTER_RSRC:
-               ret = io_register_rsrc(ctx, arg, nr_args);
+       case IORING_REGISTER_FILES2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_FILES_UPDATE2:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_FILE);
+               break;
+       case IORING_REGISTER_BUFFERS2:
+               ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
                break;
-       case IORING_REGISTER_RSRC_UPDATE:
-               ret = io_register_rsrc_update(ctx, arg, nr_args);
+       case IORING_REGISTER_BUFFERS_UPDATE:
+               ret = io_register_rsrc_update(ctx, arg, nr_args,
+                                             IORING_RSRC_BUFFER);
                break;
        default:
                ret = -EINVAL;
index cfeaadf..330f657 100644 (file)
@@ -406,7 +406,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
 
        if (cl_init->hostname == NULL) {
                WARN_ON(1);
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        /* see if the client already exists */
index d158a50..d210385 100644 (file)
@@ -718,7 +718,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                if (unlikely(!p))
                        goto out_err;
                fl->fh_array[i]->size = be32_to_cpup(p++);
-               if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+               if (fl->fh_array[i]->size > NFS_MAXFHSIZE) {
                        printk(KERN_ERR "NFS: Too big fh %d received %d\n",
                               i, fl->fh_array[i]->size);
                        goto out_err;
index 93e60e9..bc0c698 100644 (file)
@@ -362,7 +362,7 @@ static const struct kernel_param_ops param_ops_nfs_timeout = {
        .set = param_set_nfs_timeout,
        .get = param_get_nfs_timeout,
 };
-#define param_check_nfs_timeout(name, p) __param_check(name, p, int);
+#define param_check_nfs_timeout(name, p) __param_check(name, p, int)
 
 module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644);
 MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout,
index 065cb04..543d916 100644 (file)
@@ -205,6 +205,7 @@ struct nfs4_exception {
        struct inode *inode;
        nfs4_stateid *stateid;
        long timeout;
+       unsigned char task_is_privileged : 1;
        unsigned char delay : 1,
                      recovering : 1,
                      retry : 1;
index 889a9f4..4271938 100644 (file)
@@ -435,8 +435,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
                 */
                nfs_mark_client_ready(clp, -EPERM);
        }
-       nfs_put_client(clp);
        clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
+       nfs_put_client(clp);
        return old;
 
 error:
index 57b3821..a1e5c6b 100644 (file)
@@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
        case SEEK_HOLE:
        case SEEK_DATA:
                ret = nfs42_proc_llseek(filep, offset, whence);
-               if (ret != -ENOTSUPP)
+               if (ret != -EOPNOTSUPP)
                        return ret;
                fallthrough;
        default:
index 87d04f2..e653654 100644 (file)
@@ -589,6 +589,8 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
                goto out_retry;
        }
        if (exception->recovering) {
+               if (exception->task_is_privileged)
+                       return -EDEADLOCK;
                ret = nfs4_wait_clnt_recover(clp);
                if (test_bit(NFS_MIG_FAILED, &server->mig_status))
                        return -EIO;
@@ -614,6 +616,8 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
                goto out_retry;
        }
        if (exception->recovering) {
+               if (exception->task_is_privileged)
+                       return -EDEADLOCK;
                rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
                if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
                        rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
@@ -1706,7 +1710,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
                rcu_read_unlock();
                trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
 
-               if (!signal_pending(current)) {
+               if (!fatal_signal_pending(current)) {
                        if (schedule_timeout(5*HZ) == 0)
                                status = -EAGAIN;
                        else
@@ -3487,7 +3491,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
                write_sequnlock(&state->seqlock);
                trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
 
-               if (signal_pending(current))
+               if (fatal_signal_pending(current))
                        status = -EINTR;
                else
                        if (schedule_timeout(5*HZ) != 0)
@@ -3878,6 +3882,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
                        server->caps |= NFS_CAP_HARDLINKS;
                if (res.has_symlinks != 0)
                        server->caps |= NFS_CAP_SYMLINKS;
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+               if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
+                       server->caps |= NFS_CAP_SECURITY_LABEL;
+#endif
                if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
                        server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
                if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3898,10 +3906,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
                        server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
                if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
                        server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
-               if (!(res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL))
-                       server->fattr_valid &= ~NFS_ATTR_FATTR_V4_SECURITY_LABEL;
-#endif
                memcpy(server->attr_bitmask_nl, res.attr_bitmask,
                                sizeof(server->attr_bitmask));
                server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
@@ -5968,6 +5972,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
        do {
                err = __nfs4_proc_set_acl(inode, buf, buflen);
                trace_nfs4_set_acl(inode, err);
+               if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
+                       /*
+                        * no need to retry since the kernel
+                        * isn't involved in encoding the ACEs.
+                        */
+                       err = -EINVAL;
+                       break;
+               }
                err = nfs4_handle_exception(NFS_SERVER(inode), err,
                                &exception);
        } while (exception.retry);
@@ -6409,6 +6421,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        struct nfs4_exception exception = {
                .inode = data->inode,
                .stateid = &data->stateid,
+               .task_is_privileged = data->args.seq_args.sa_privileged,
        };
 
        if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6532,7 +6545,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
        data = kzalloc(sizeof(*data), GFP_NOFS);
        if (data == NULL)
                return -ENOMEM;
-       nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
 
        nfs4_state_protect(server->nfs_client,
                        NFS_SP4_MACH_CRED_CLEANUP,
@@ -6563,6 +6575,12 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
                }
        }
 
+       if (!data->inode)
+               nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+                                  1);
+       else
+               nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
+                                  0);
        task_setup_data.callback_data = data;
        msg.rpc_argp = &data->args;
        msg.rpc_resp = &data->res;
@@ -9640,15 +9658,20 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
                        &task_setup_data.rpc_client, &msg);
 
        dprintk("--> %s\n", __func__);
+       lrp->inode = nfs_igrab_and_active(lrp->args.inode);
        if (!sync) {
-               lrp->inode = nfs_igrab_and_active(lrp->args.inode);
                if (!lrp->inode) {
                        nfs4_layoutreturn_release(lrp);
                        return -EAGAIN;
                }
                task_setup_data.flags |= RPC_TASK_ASYNC;
        }
-       nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
+       if (!lrp->inode)
+               nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+                                  1);
+       else
+               nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
+                                  0);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
index eb1ef34..ccef43e 100644 (file)
@@ -430,10 +430,6 @@ TRACE_DEFINE_ENUM(O_CLOEXEC);
                { O_NOATIME, "O_NOATIME" }, \
                { O_CLOEXEC, "O_CLOEXEC" })
 
-TRACE_DEFINE_ENUM(FMODE_READ);
-TRACE_DEFINE_ENUM(FMODE_WRITE);
-TRACE_DEFINE_ENUM(FMODE_EXEC);
-
 #define show_fmode_flags(mode) \
        __print_flags(mode, "|", \
                { ((__force unsigned long)FMODE_READ), "READ" }, \
index 6c20b28..cf9cc62 100644 (file)
@@ -1094,15 +1094,16 @@ nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
        struct nfs_page *prev = NULL;
        unsigned int size;
 
-       if (mirror->pg_count != 0) {
-               prev = nfs_list_entry(mirror->pg_list.prev);
-       } else {
+       if (list_empty(&mirror->pg_list)) {
                if (desc->pg_ops->pg_init)
                        desc->pg_ops->pg_init(desc, req);
                if (desc->pg_error < 0)
                        return 0;
                mirror->pg_base = req->wb_pgbase;
-       }
+               mirror->pg_count = 0;
+               mirror->pg_recoalesce = 0;
+       } else
+               prev = nfs_list_entry(mirror->pg_list.prev);
 
        if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
                if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
@@ -1127,18 +1128,13 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
 {
        struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
 
-
        if (!list_empty(&mirror->pg_list)) {
                int error = desc->pg_ops->pg_doio(desc);
                if (error < 0)
                        desc->pg_error = error;
-               else
+               if (list_empty(&mirror->pg_list))
                        mirror->pg_bytes_written += mirror->pg_count;
        }
-       if (list_empty(&mirror->pg_list)) {
-               mirror->pg_count = 0;
-               mirror->pg_base = 0;
-       }
 }
 
 static void
@@ -1227,10 +1223,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
 
        do {
                list_splice_init(&mirror->pg_list, &head);
-               mirror->pg_bytes_written -= mirror->pg_count;
-               mirror->pg_count = 0;
-               mirror->pg_base = 0;
-               mirror->pg_recoalesce = 0;
 
                while (!list_empty(&head)) {
                        struct nfs_page *req;
index 03e0b34..2c01ee8 100644 (file)
@@ -1317,6 +1317,11 @@ _pnfs_return_layout(struct inode *ino)
 {
        struct pnfs_layout_hdr *lo = NULL;
        struct nfs_inode *nfsi = NFS_I(ino);
+       struct pnfs_layout_range range = {
+               .iomode         = IOMODE_ANY,
+               .offset         = 0,
+               .length         = NFS4_MAX_UINT64,
+       };
        LIST_HEAD(tmp_list);
        const struct cred *cred;
        nfs4_stateid stateid;
@@ -1344,16 +1349,10 @@ _pnfs_return_layout(struct inode *ino)
        }
        valid_layout = pnfs_layout_is_valid(lo);
        pnfs_clear_layoutcommit(ino, &tmp_list);
-       pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
+       pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
 
-       if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
-               struct pnfs_layout_range range = {
-                       .iomode         = IOMODE_ANY,
-                       .offset         = 0,
-                       .length         = NFS4_MAX_UINT64,
-               };
+       if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
                NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
-       }
 
        /* Don't send a LAYOUTRETURN if list was initially empty */
        if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
@@ -2678,7 +2677,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
 void
 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
 {
-       u64 rd_size = req->wb_bytes;
+       u64 rd_size;
 
        pnfs_generic_pg_check_layout(pgio);
        pnfs_generic_pg_check_range(pgio, req);
index 19a212f..fe58525 100644 (file)
@@ -1379,7 +1379,7 @@ static const struct kernel_param_ops param_ops_portnr = {
        .set = param_set_portnr,
        .get = param_get_uint,
 };
-#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int)
 
 module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
 module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644);
index 71fefb3..64864fb 100644 (file)
@@ -424,11 +424,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
         * events generated by the listener process itself, without disclosing
         * the pids of other processes.
         */
-       if (!capable(CAP_SYS_ADMIN) &&
+       if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
            task_tgid(current) != event->pid)
                metadata.pid = 0;
 
-       if (path && path->mnt && path->dentry) {
+       /*
+        * For now, fid mode is required for an unprivileged listener and
+        * fid mode does not report fd in events.  Keep this check anyway
+        * for safety in case fid mode requirement is relaxed in the future
+        * to allow unprivileged listener to get events with no fd and no fid.
+        */
+       if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
+           path && path->mnt && path->dentry) {
                fd = create_fd(group, path, &f);
                if (fd < 0)
                        return fd;
@@ -464,7 +471,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                        info_type, fanotify_info_name(info),
                                        info->name_len, buf, count);
                if (ret < 0)
-                       return ret;
+                       goto out_close_fd;
 
                buf += ret;
                count -= ret;
@@ -512,7 +519,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                        fanotify_event_object_fh(event),
                                        info_type, dot, dot_len, buf, count);
                if (ret < 0)
-                       return ret;
+                       goto out_close_fd;
 
                buf += ret;
                count -= ret;
@@ -1040,6 +1047,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
        int f_flags, fd;
        unsigned int fid_mode = flags & FANOTIFY_FID_BITS;
        unsigned int class = flags & FANOTIFY_CLASS_BITS;
+       unsigned int internal_flags = 0;
 
        pr_debug("%s: flags=%x event_f_flags=%x\n",
                 __func__, flags, event_f_flags);
@@ -1053,6 +1061,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
                 */
                if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode)
                        return -EPERM;
+
+               /*
+                * Setting the internal flag FANOTIFY_UNPRIV on the group
+                * prevents setting mount/filesystem marks on this group and
+                * prevents reporting pid and open fd in events.
+                */
+               internal_flags |= FANOTIFY_UNPRIV;
        }
 
 #ifdef CONFIG_AUDITSYSCALL
@@ -1105,7 +1120,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
                goto out_destroy_group;
        }
 
-       group->fanotify_data.flags = flags;
+       group->fanotify_data.flags = flags | internal_flags;
        group->memcg = get_mem_cgroup_from_mm(current->mm);
 
        group->fanotify_data.merge_hash = fanotify_alloc_merge_hash();
@@ -1305,11 +1320,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        group = f.file->private_data;
 
        /*
-        * An unprivileged user is not allowed to watch a mount point nor
-        * a filesystem.
+        * An unprivileged user is not allowed to setup mount nor filesystem
+        * marks.  This also includes setting up such marks by a group that
+        * was initialized by an unprivileged user.
         */
        ret = -EPERM;
-       if (!capable(CAP_SYS_ADMIN) &&
+       if ((!capable(CAP_SYS_ADMIN) ||
+            FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) &&
            mark_type != FAN_MARK_INODE)
                goto fput_and_out;
 
@@ -1460,6 +1477,7 @@ static int __init fanotify_user_setup(void)
        max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS,
                                     FANOTIFY_DEFAULT_MAX_USER_MARKS);
 
+       BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
 
index a712b2a..57f0d5d 100644 (file)
@@ -144,7 +144,7 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f)
        struct fsnotify_group *group = f->private_data;
 
        seq_printf(m, "fanotify flags:%x event-flags:%x\n",
-                  group->fanotify_data.flags,
+                  group->fanotify_data.flags & FANOTIFY_INIT_FLAGS,
                   group->fanotify_data.f_flags);
 
        show_fdinfo(m, f, fanotify_fdinfo);
index f17c3d3..7756579 100644 (file)
@@ -1856,6 +1856,45 @@ out:
 }
 
 /*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ *      is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+                                       u64 start, u64 len)
+{
+       int ret;
+       u64 start_block, end_block, nr_blocks;
+       u64 p_block, offset;
+       u32 cluster, p_cluster, nr_clusters;
+       struct super_block *sb = inode->i_sb;
+       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+       if (start + len < end)
+               end = start + len;
+
+       start_block = ocfs2_blocks_for_bytes(sb, start);
+       end_block = ocfs2_blocks_for_bytes(sb, end);
+       nr_blocks = end_block - start_block;
+       if (!nr_blocks)
+               return 0;
+
+       cluster = ocfs2_bytes_to_clusters(sb, start);
+       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+                               &nr_clusters, NULL);
+       if (ret)
+               return ret;
+       if (!p_cluster)
+               return 0;
+
+       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
+/*
  * Parts of this function taken from xfs_change_file_space()
  */
 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
 {
        int ret;
        s64 llen;
-       loff_t size;
+       loff_t size, orig_isize;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct buffer_head *di_bh = NULL;
        handle_t *handle;
@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
+       orig_isize = i_size_read(inode);
        switch (sr->l_whence) {
        case 0: /*SEEK_SET*/
                break;
@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                sr->l_start += f_pos;
                break;
        case 2: /*SEEK_END*/
-               sr->l_start += i_size_read(inode);
+               sr->l_start += orig_isize;
                break;
        default:
                ret = -EINVAL;
@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        default:
                ret = -EINVAL;
        }
+
+       /* zeroout eof blocks in the cluster. */
+       if (!ret && change_size && orig_isize < size) {
+               ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
+                                       size - orig_isize);
+               if (!ret)
+                       i_size_write(inode, size);
+       }
        up_write(&OCFS2_I(inode)->ip_alloc_sem);
        if (ret) {
                mlog_errno(ret);
@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
-       if (change_size && i_size_read(inode) < size)
-               i_size_write(inode, size);
-
        inode->i_ctime = inode->i_mtime = current_time(inode);
        ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
        if (ret < 0)
index 58bbf33..9cbd915 100644 (file)
@@ -2674,6 +2674,13 @@ out:
 }
 
 #ifdef CONFIG_SECURITY
+static int proc_pid_attr_open(struct inode *inode, struct file *file)
+{
+       file->private_data = NULL;
+       __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
+       return 0;
+}
+
 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
                                  size_t count, loff_t *ppos)
 {
@@ -2704,7 +2711,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
        int rv;
 
        /* A task may only write when it was the opener. */
-       if (file->f_cred != current_real_cred())
+       if (file->private_data != current->mm)
                return -EPERM;
 
        rcu_read_lock();
@@ -2754,9 +2761,11 @@ out:
 }
 
 static const struct file_operations proc_pid_attr_operations = {
+       .open           = proc_pid_attr_open,
        .read           = proc_pid_attr_read,
        .write          = proc_pid_attr_write,
        .llseek         = generic_file_llseek,
+       .release        = mem_release,
 };
 
 #define LSM_DIR_OPS(LSM) \
index e32a183..bbfea80 100644 (file)
@@ -325,10 +325,22 @@ out:
                error2 = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, 0);
                if (error2)
                        return error2;
-               ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
-                      xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved <=
-                      pag->pagf_freeblks + pag->pagf_flcount);
+
+               /*
+                * If there isn't enough space in the AG to satisfy the
+                * reservation, let the caller know that there wasn't enough
+                * space.  Callers are responsible for deciding what to do
+                * next, since (in theory) we can stumble along with
+                * insufficient reservation if data blocks are being freed to
+                * replenish the AG's free space.
+                */
+               if (!error &&
+                   xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
+                   xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved >
+                   pag->pagf_freeblks + pag->pagf_flcount)
+                       error = -ENOSPC;
        }
+
        return error;
 }
 
index 7e3b9b0..a3e0e6f 100644 (file)
@@ -605,7 +605,6 @@ xfs_bmap_btree_to_extents(
 
        ASSERT(cur);
        ASSERT(whichfork != XFS_COW_FORK);
-       ASSERT(!xfs_need_iread_extents(ifp));
        ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
        ASSERT(be16_to_cpu(rblock->bb_level) == 1);
        ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
@@ -5350,7 +5349,6 @@ __xfs_bunmapi(
        xfs_fsblock_t           sum;
        xfs_filblks_t           len = *rlen;    /* length to unmap in file */
        xfs_fileoff_t           max_len;
-       xfs_agnumber_t          prev_agno = NULLAGNUMBER, agno;
        xfs_fileoff_t           end;
        struct xfs_iext_cursor  icur;
        bool                    done = false;
@@ -5442,16 +5440,6 @@ __xfs_bunmapi(
                del = got;
                wasdel = isnullstartblock(del.br_startblock);
 
-               /*
-                * Make sure we don't touch multiple AGF headers out of order
-                * in a single transaction, as that could cause AB-BA deadlocks.
-                */
-               if (!wasdel && !isrt) {
-                       agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
-                       if (prev_agno != NULLAGNUMBER && prev_agno > agno)
-                               break;
-                       prev_agno = agno;
-               }
                if (got.br_startoff < start) {
                        del.br_startoff = start;
                        del.br_blockcount -= start - got.br_startoff;
index 5c9a744..f3254a4 100644 (file)
@@ -559,8 +559,17 @@ xfs_dinode_calc_crc(
 /*
  * Validate di_extsize hint.
  *
- * The rules are documented at xfs_ioctl_setattr_check_extsize().
- * These functions must be kept in sync with each other.
+ * 1. Extent size hint is only valid for directories and regular files.
+ * 2. FS_XFLAG_EXTSIZE is only valid for regular files.
+ * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
+ * 4. Hint cannot be larger than MAXTEXTLEN.
+ * 5. Can be changed on directories at any time.
+ * 6. Hint value of 0 turns off hints, clears inode flags.
+ * 7. Extent size must be a multiple of the appropriate block size.
+ *    For realtime files, this is the rt extent size.
+ * 8. For non-realtime files, the extent size hint must be limited
+ *    to half the AG size to avoid alignment extending the extent beyond the
+ *    limits of the AG.
  */
 xfs_failaddr_t
 xfs_inode_validate_extsize(
@@ -580,6 +589,28 @@ xfs_inode_validate_extsize(
        inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
        extsize_bytes = XFS_FSB_TO_B(mp, extsize);
 
+       /*
+        * This comment describes a historic gap in this verifier function.
+        *
+        * On older kernels, the extent size hint verifier doesn't check that
+        * the extent size hint is an integer multiple of the realtime extent
+        * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
+        * The verifier has always enforced the alignment rule for regular
+        * files with the REALTIME flag set.
+        *
+        * If a directory with a misaligned extent size hint is allowed to
+        * propagate that hint into a new regular realtime file, the result
+        * is that the inode cluster buffer verifier will trigger a corruption
+        * shutdown the next time it is run.
+        *
+        * Unfortunately, there could be filesystems with these misconfigured
+        * directories in the wild, so we cannot add a check to this verifier
+        * at this time because that will result a new source of directory
+        * corruption errors when reading an existing filesystem.  Instead, we
+        * permit the misconfiguration to pass through the verifiers so that
+        * callers of this function can correct and mitigate externally.
+        */
+
        if (rt_flag)
                blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
        else
@@ -616,8 +647,15 @@ xfs_inode_validate_extsize(
 /*
  * Validate di_cowextsize hint.
  *
- * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
- * These functions must be kept in sync with each other.
+ * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
+ *    The inode does not have to have any shared blocks, but it must be a v3.
+ * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
+ *    for a directory, the hint is propagated to new files.
+ * 3. Can be changed on files & directories at any time.
+ * 4. Hint value of 0 turns off hints, clears inode flags.
+ * 5. Extent size must be a multiple of the appropriate block size.
+ * 6. The extent size hint must be limited to half the AG size to avoid
+ *    alignment extending the extent beyond the limits of the AG.
  */
 xfs_failaddr_t
 xfs_inode_validate_cowextsize(
index 78324e0..8d595a5 100644 (file)
@@ -143,6 +143,23 @@ xfs_trans_log_inode(
        }
 
        /*
+        * Inode verifiers on older kernels don't check that the extent size
+        * hint is an integer multiple of the rt extent size on a directory
+        * with both rtinherit and extszinherit flags set.  If we're logging a
+        * directory that is misconfigured in this way, clear the hint.
+        */
+       if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+           (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
+           (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+               xfs_info_once(ip->i_mount,
+       "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
+               ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+                                  XFS_DIFLAG_EXTSZINHERIT);
+               ip->i_extsize = 0;
+               flags |= XFS_ILOG_CORE;
+       }
+
+       /*
         * Record the specific change for fdatasync optimisation. This allows
         * fdatasync to skip log forces for inodes that are only timestamp
         * dirty.
index 0369eb2..e4c2da4 100644 (file)
@@ -690,6 +690,7 @@ xfs_inode_inherit_flags(
        const struct xfs_inode  *pip)
 {
        unsigned int            di_flags = 0;
+       xfs_failaddr_t          failaddr;
        umode_t                 mode = VFS_I(ip)->i_mode;
 
        if (S_ISDIR(mode)) {
@@ -729,6 +730,24 @@ xfs_inode_inherit_flags(
                di_flags |= XFS_DIFLAG_FILESTREAM;
 
        ip->i_diflags |= di_flags;
+
+       /*
+        * Inode verifiers on older kernels only check that the extent size
+        * hint is an integer multiple of the rt extent size on realtime files.
+        * They did not check the hint alignment on a directory with both
+        * rtinherit and extszinherit flags set.  If the misaligned hint is
+        * propagated from a directory into a new realtime file, new file
+        * allocations will fail due to math errors in the rt allocator and/or
+        * trip the verifiers.  Validate the hint settings in the new file so
+        * that we don't let broken hints propagate.
+        */
+       failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
+                       VFS_I(ip)->i_mode, ip->i_diflags);
+       if (failaddr) {
+               ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+                                  XFS_DIFLAG_EXTSZINHERIT);
+               ip->i_extsize = 0;
+       }
 }
 
 /* Propagate di_flags2 from a parent inode to a child inode. */
@@ -737,12 +756,22 @@ xfs_inode_inherit_flags2(
        struct xfs_inode        *ip,
        const struct xfs_inode  *pip)
 {
+       xfs_failaddr_t          failaddr;
+
        if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
                ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
                ip->i_cowextsize = pip->i_cowextsize;
        }
        if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
                ip->i_diflags2 |= XFS_DIFLAG2_DAX;
+
+       /* Don't let invalid cowextsize hints propagate. */
+       failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
+                       VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
+       if (failaddr) {
+               ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
+               ip->i_cowextsize = 0;
+       }
 }
 
 /*
index 3925bfc..1fe4c1f 100644 (file)
@@ -1267,20 +1267,8 @@ out_error:
 }
 
 /*
- * extent size hint validation is somewhat cumbersome. Rules are:
- *
- * 1. extent size hint is only valid for directories and regular files
- * 2. FS_XFLAG_EXTSIZE is only valid for regular files
- * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
- * 4. can only be changed on regular files if no extents are allocated
- * 5. can be changed on directories at any time
- * 6. extsize hint of 0 turns off hints, clears inode flags.
- * 7. Extent size must be a multiple of the appropriate block size.
- * 8. for non-realtime files, the extent size hint must be limited
- *    to half the AG size to avoid alignment extending the extent beyond the
- *    limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_extsize.
+ * Validate a proposed extent size hint.  For regular files, the hint can only
+ * be changed if no extents are allocated.
  */
 static int
 xfs_ioctl_setattr_check_extsize(
@@ -1288,86 +1276,65 @@ xfs_ioctl_setattr_check_extsize(
        struct fileattr         *fa)
 {
        struct xfs_mount        *mp = ip->i_mount;
-       xfs_extlen_t            size;
-       xfs_fsblock_t           extsize_fsb;
+       xfs_failaddr_t          failaddr;
+       uint16_t                new_diflags;
 
        if (!fa->fsx_valid)
                return 0;
 
        if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
-           ((ip->i_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
+           XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize)
                return -EINVAL;
 
-       if (fa->fsx_extsize == 0)
-               return 0;
-
-       extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
-       if (extsize_fsb > MAXEXTLEN)
+       if (fa->fsx_extsize & mp->m_blockmask)
                return -EINVAL;
 
-       if (XFS_IS_REALTIME_INODE(ip) ||
-           (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
-               size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
-       } else {
-               size = mp->m_sb.sb_blocksize;
-               if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
+       new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+
+       /*
+        * Inode verifiers on older kernels don't check that the extent size
+        * hint is an integer multiple of the rt extent size on a directory
+        * with both rtinherit and extszinherit flags set.  Don't let sysadmins
+        * misconfigure directories.
+        */
+       if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
+           (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
+               unsigned int    rtextsize_bytes;
+
+               rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
+               if (fa->fsx_extsize % rtextsize_bytes)
                        return -EINVAL;
        }
 
-       if (fa->fsx_extsize % size)
-               return -EINVAL;
-
-       return 0;
+       failaddr = xfs_inode_validate_extsize(ip->i_mount,
+                       XFS_B_TO_FSB(mp, fa->fsx_extsize),
+                       VFS_I(ip)->i_mode, new_diflags);
+       return failaddr != NULL ? -EINVAL : 0;
 }
 
-/*
- * CoW extent size hint validation rules are:
- *
- * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
- *    The inode does not have to have any shared blocks, but it must be a v3.
- * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
- *    for a directory, the hint is propagated to new files.
- * 3. Can be changed on files & directories at any time.
- * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
- * 5. Extent size must be a multiple of the appropriate block size.
- * 6. The extent size hint must be limited to half the AG size to avoid
- *    alignment extending the extent beyond the limits of the AG.
- *
- * Please keep this function in sync with xfs_scrub_inode_cowextsize.
- */
 static int
 xfs_ioctl_setattr_check_cowextsize(
        struct xfs_inode        *ip,
        struct fileattr         *fa)
 {
        struct xfs_mount        *mp = ip->i_mount;
-       xfs_extlen_t            size;
-       xfs_fsblock_t           cowextsize_fsb;
+       xfs_failaddr_t          failaddr;
+       uint64_t                new_diflags2;
+       uint16_t                new_diflags;
 
        if (!fa->fsx_valid)
                return 0;
 
-       if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
-               return 0;
-
-       if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
+       if (fa->fsx_cowextsize & mp->m_blockmask)
                return -EINVAL;
 
-       if (fa->fsx_cowextsize == 0)
-               return 0;
+       new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
+       new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
 
-       cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
-       if (cowextsize_fsb > MAXEXTLEN)
-               return -EINVAL;
-
-       size = mp->m_sb.sb_blocksize;
-       if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
-               return -EINVAL;
-
-       if (fa->fsx_cowextsize % size)
-               return -EINVAL;
-
-       return 0;
+       failaddr = xfs_inode_validate_cowextsize(ip->i_mount,
+                       XFS_B_TO_FSB(mp, fa->fsx_cowextsize),
+                       VFS_I(ip)->i_mode, new_diflags, new_diflags2);
+       return failaddr != NULL ? -EINVAL : 0;
 }
 
 static int
index 3c392b1..7ec1a92 100644 (file)
@@ -73,6 +73,8 @@ do {                                                                  \
        xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
 #define xfs_notice_once(dev, fmt, ...)                         \
        xfs_printk_once(xfs_notice, dev, fmt, ##__VA_ARGS__)
+#define xfs_info_once(dev, fmt, ...)                           \
+       xfs_printk_once(xfs_info, dev, fmt, ##__VA_ARGS__)
 
 void assfail(struct xfs_mount *mp, char *expr, char *f, int l);
 void asswarn(struct xfs_mount *mp, char *expr, char *f, int l);
index 40a9c10..1732541 100644 (file)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 #define PERCPU_DECRYPTED_SECTION                                       \
        . = ALIGN(PAGE_SIZE);                                           \
+       *(.data..decrypted)                                             \
        *(.data..percpu..decrypted)                                     \
        . = ALIGN(PAGE_SIZE);
 #else
index fef3ef6..e6526b1 100644 (file)
  * <20:16>  :: Reserved, Shall be set to zero
  * <15:0>   :: USB-IF assigned VID for this cable vendor
  */
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF                0
+
 /* SOP Product Type (UFP) */
 #define IDH_PTYPE_NOT_UFP       0
 #define IDH_PTYPE_HUB           1
 #define UFP_VDO_VER1_2         2
 
 /* Device Capability */
-#define DEV_USB2_CAPABLE       BIT(0)
-#define DEV_USB2_BILLBOARD     BIT(1)
-#define DEV_USB3_CAPABLE       BIT(2)
-#define DEV_USB4_CAPABLE       BIT(3)
+#define DEV_USB2_CAPABLE       (1 << 0)
+#define DEV_USB2_BILLBOARD     (1 << 1)
+#define DEV_USB3_CAPABLE       (1 << 2)
+#define DEV_USB4_CAPABLE       (1 << 3)
 
 /* Connector Type */
 #define UFP_RECEPTACLE         2
 
 /* Alternate Modes */
 #define UFP_ALTMODE_NOT_SUPP   0
-#define UFP_ALTMODE_TBT3       BIT(0)
-#define UFP_ALTMODE_RECFG      BIT(1)
-#define UFP_ALTMODE_NO_RECFG   BIT(2)
+#define UFP_ALTMODE_TBT3       (1 << 0)
+#define UFP_ALTMODE_RECFG      (1 << 1)
+#define UFP_ALTMODE_NO_RECFG   (1 << 2)
 
 /* USB Highest Speed */
 #define UFP_USB2_ONLY          0
  * <4:0>   :: Port number
  */
 #define DFP_VDO_VER1_1         1
-#define HOST_USB2_CAPABLE      BIT(0)
-#define HOST_USB3_CAPABLE      BIT(1)
-#define HOST_USB4_CAPABLE      BIT(2)
+#define HOST_USB2_CAPABLE      (1 << 0)
+#define HOST_USB3_CAPABLE      (1 << 1)
+#define HOST_USB4_CAPABLE      (1 << 2)
 #define DFP_RECEPTACLE         2
 #define DFP_CAPTIVE            3
 
         | ((pnum) & 0x1f))
 
 /*
- * Passive Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17>    :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10>    :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9>     :: SSTX2 Directionality support
+ * <8>     :: SSRX1 Directionality support
+ * <7>     :: SSRX2 Directionality support
+ * <6:5>   :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4>     :: Vbus through cable (0b == no, 1b == yes)
+ * <3>     :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0>   :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
  * ---------
  * <31:28> :: Cable HW version
  * <27:24> :: Cable FW version
  * <4:3>   :: Reserved, Shall be set to zero
  * <2:0>   :: USB highest speed
  *
- * Active Cable VDO 1
+ * Active Cable VDO 1 (PD Rev3.0+)
  * ---------
  * <31:28> :: Cable HW version
  * <27:24> :: Cable FW version
 #define CABLE_VDO_VER1_0       0
 #define CABLE_VDO_VER1_3       3
 
-/* Connector Type */
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE            0
+#define CABLE_BTYPE            1
 #define CABLE_CTYPE            2
 #define CABLE_CAPTIVE          3
 
 #define CABLE_CURR_3A          1
 #define CABLE_CURR_5A          2
 
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY    0
+#define CABLE_USBSS_U31_GEN1   1
+#define CABLE_USBSS_U31_GEN2   2
+
 /* USB Highest Speed */
 #define CABLE_USB2_ONLY                0
 #define CABLE_USB32_GEN1       1
 #define CABLE_USB32_4_GEN2     2
 #define CABLE_USB4_GEN3                3
 
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+       (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18          \
+        | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10            \
+        | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5         \
+        | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
 #define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd)                        \
        (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21          \
         | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11    \
         | (iso) << 2 | (gen))
 
 /*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11>    :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10>    :: SSTX2 Directionality support
+ * <9>     :: SSRX1 Directionality support
+ * <8>     :: SSRX2 Directionality support
+ * <7:5>   :: Vconn power
+ * <4>     :: Vconn power required
+ * <3>     :: Vbus power required
+ * <2:0>   :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+       (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24                        \
+        | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8      \
+        | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3               \
+        | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo)      (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo)       (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY      0
+#define AMA_USBSS_U31_GEN1     1
+#define AMA_USBSS_U31_GEN2     2
+#define AMA_USBSS_BBONLY       3
+
+/*
  * VPD VDO
  * ---------
  * <31:28> :: HW version
index f180240..11e555c 100644 (file)
@@ -37,7 +37,6 @@ bool topology_scale_freq_invariant(void);
 enum scale_freq_source {
        SCALE_FREQ_SOURCE_CPUFREQ = 0,
        SCALE_FREQ_SOURCE_ARCH,
-       SCALE_FREQ_SOURCE_CPPC,
 };
 
 struct scale_freq_data {
index c043b8d..183ddd5 100644 (file)
  * must end with any of these keywords:
  *   break;
  *   fallthrough;
+ *   continue;
  *   goto <label>;
  *   return [expression];
  *
index a1e7cab..8f0ec30 100644 (file)
@@ -570,7 +570,7 @@ struct device {
  * @flags: Link flags.
  * @rpm_active: Whether or not the consumer device is runtime-PM-active.
  * @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @rm_work: Work structure used for removing the link.
  * @supplier_preactivated: Supplier has been made active before consumer probe.
  */
 struct device_link {
@@ -583,9 +583,7 @@ struct device_link {
        u32 flags;
        refcount_t rpm_active;
        struct kref kref;
-#ifdef CONFIG_SRCU
-       struct rcu_head rcu_head;
-#endif
+       struct work_struct rm_work;
        bool supplier_preactivated; /* Owned by consumer probe. */
 };
 
index 8b2b1d6..136b8d9 100644 (file)
@@ -3,6 +3,7 @@
 #define __LINUX_ENTRYKVM_H
 
 #include <linux/entry-common.h>
+#include <linux/tick.h>
 
 /* Transfer to guest mode work */
 #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
 static inline void xfer_to_guest_mode_prepare(void)
 {
        lockdep_assert_irqs_disabled();
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 }
 
 /**
index e030f75..29dbb60 100644 (file)
@@ -401,12 +401,12 @@ struct ethtool_rmon_stats {
  * required information to the driver.
  */
 struct ethtool_module_eeprom {
-       __u32   offset;
-       __u32   length;
-       __u8    page;
-       __u8    bank;
-       __u8    i2c_address;
-       __u8    *data;
+       u32     offset;
+       u32     length;
+       u8      page;
+       u8      bank;
+       u8      i2c_address;
+       u8      *data;
 };
 
 /**
index bad41bc..a16dbec 100644 (file)
@@ -51,6 +51,10 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
 #define FANOTIFY_INIT_FLAGS    (FANOTIFY_ADMIN_INIT_FLAGS | \
                                 FANOTIFY_USER_INIT_FLAGS)
 
+/* Internal group flags */
+#define FANOTIFY_UNPRIV                0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS  (FANOTIFY_UNPRIV)
+
 #define FANOTIFY_MARK_TYPE_BITS        (FAN_MARK_INODE | FAN_MARK_MOUNT | \
                                 FAN_MARK_FILESYSTEM)
 
index a8dccd2..ecfbcc0 100644 (file)
@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
 /* drivers/video/fb_defio.c */
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
 extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+                               struct inode *inode,
+                               struct file *file);
 extern void fb_deferred_io_cleanup(struct fb_info *info);
 extern int fb_deferred_io_fsync(struct file *file, loff_t start,
                                loff_t end, int datasync);
index 271021e..10e922c 100644 (file)
@@ -1167,8 +1167,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
  */
 static inline u32 hid_report_len(struct hid_report *report)
 {
-       /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
-       return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
 }
 
 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
index 232e1bd..9b0487c 100644 (file)
@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key);
-#define host1x_client_register(class) \
-       ({ \
-               static struct lock_class_key __key; \
-               __host1x_client_register(class, &__key); \
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client)                     \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+       })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client)                 \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+               __host1x_client_register(client);       \
        })
 
 int host1x_client_unregister(struct host1x_client *client);
index 9626fda..2a8ebe6 100644 (file)
@@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
 
 extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
 
 static inline bool is_huge_zero_page(struct page *page)
 {
@@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
 
 static inline bool is_huge_zero_pmd(pmd_t pmd)
 {
-       return is_huge_zero_page(pmd_page(pmd));
+       return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
 }
 
 static inline bool is_huge_zero_pud(pud_t pud)
@@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
        return false;
 }
 
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+       return false;
+}
+
 static inline bool is_huge_zero_pud(pud_t pud)
 {
        return false;
index b92f25c..6504346 100644 (file)
@@ -149,6 +149,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
                                                long freed);
 bool isolate_huge_page(struct page *page, struct list_head *list);
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
 void putback_active_hugepage(struct page *page);
 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 void free_huge_page(struct page *page);
@@ -339,6 +340,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
        return false;
 }
 
+static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+       return 0;
+}
+
 static inline void putback_active_hugepage(struct page *page)
 {
 }
@@ -604,6 +610,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
                                unsigned long address);
 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
                        pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+                               unsigned long address, struct page *page);
 
 /* arch callback */
 int __init __alloc_bootmem_huge_page(struct hstate *h);
index 2967437..a673007 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
  * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2020 Intel Corporation
+ * Copyright (c) 2018 - 2021 Intel Corporation
  */
 
 #ifndef LINUX_IEEE80211_H
@@ -2179,6 +2179,8 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED             0xc0
 #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK                 0xc0
 
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF                      0x01
+
 /* 802.11ax HE TX/RX MCS NSS Support  */
 #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS                   (3)
 #define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS                     (6)
@@ -2933,6 +2935,7 @@ enum ieee80211_category {
        WLAN_CATEGORY_BACK = 3,
        WLAN_CATEGORY_PUBLIC = 4,
        WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+       WLAN_CATEGORY_FAST_BBS_TRANSITION = 6,
        WLAN_CATEGORY_HT = 7,
        WLAN_CATEGORY_SA_QUERY = 8,
        WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -3110,6 +3113,11 @@ enum ieee80211_tdls_actioncode {
  */
 #define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT     BIT(6)
 
+/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte
+ * of the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT      BIT(7)
+
 /* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
 #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA         BIT(4)
 #define WLAN_EXT_CAPA4_TDLS_PEER_PSM           BIT(5)
index bf5c5f3..b712217 100644 (file)
@@ -48,6 +48,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
        case ARPHRD_TUNNEL6:
        case ARPHRD_SIT:
        case ARPHRD_IPGRE:
+       case ARPHRD_IP6GRE:
        case ARPHRD_VOID:
        case ARPHRD_NONE:
        case ARPHRD_RAWIP:
index 12e9a32..b651c5e 100644 (file)
@@ -71,7 +71,8 @@ bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
 bool br_multicast_enabled(const struct net_device *dev);
 bool br_multicast_router(const struct net_device *dev);
 int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
-                 struct notifier_block *nb, struct netlink_ext_ack *extack);
+                 const void *ctx, bool adding, struct notifier_block *nb,
+                 struct netlink_ext_ack *extack);
 #else
 static inline int br_multicast_list_adjacent(struct net_device *dev,
                                             struct list_head *br_ip_list)
@@ -103,9 +104,9 @@ static inline bool br_multicast_router(const struct net_device *dev)
 {
        return false;
 }
-static inline int br_mdb_replay(struct net_device *br_dev,
-                               struct net_device *dev,
-                               struct notifier_block *nb,
+static inline int br_mdb_replay(const struct net_device *br_dev,
+                               const struct net_device *dev, const void *ctx,
+                               bool adding, struct notifier_block *nb,
                                struct netlink_ext_ack *extack)
 {
        return -EOPNOTSUPP;
@@ -120,7 +121,8 @@ int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
 int br_vlan_get_info(const struct net_device *dev, u16 vid,
                     struct bridge_vlan_info *p_vinfo);
 int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
-                  struct notifier_block *nb, struct netlink_ext_ack *extack);
+                  const void *ctx, bool adding, struct notifier_block *nb,
+                  struct netlink_ext_ack *extack);
 #else
 static inline bool br_vlan_enabled(const struct net_device *dev)
 {
@@ -149,8 +151,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
 }
 
 static inline int br_vlan_replay(struct net_device *br_dev,
-                                struct net_device *dev,
-                                struct notifier_block *nb,
+                                struct net_device *dev, const void *ctx,
+                                bool adding, struct notifier_block *nb,
                                 struct netlink_ext_ack *extack)
 {
        return -EOPNOTSUPP;
@@ -164,9 +166,9 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
 void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
 u8 br_port_get_stp_state(const struct net_device *dev);
-clock_t br_get_ageing_time(struct net_device *br_dev);
-int br_fdb_replay(struct net_device *br_dev, struct net_device *dev,
-                 struct notifier_block *nb);
+clock_t br_get_ageing_time(const struct net_device *br_dev);
+int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
+                 const void *ctx, bool adding, struct notifier_block *nb);
 #else
 static inline struct net_device *
 br_fdb_find_port(const struct net_device *br_dev,
@@ -191,14 +193,14 @@ static inline u8 br_port_get_stp_state(const struct net_device *dev)
        return BR_STATE_DISABLED;
 }
 
-static inline clock_t br_get_ageing_time(struct net_device *br_dev)
+static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
 {
        return 0;
 }
 
-static inline int br_fdb_replay(struct net_device *br_dev,
-                               struct net_device *dev,
-                               struct notifier_block *nb)
+static inline int br_fdb_replay(const struct net_device *br_dev,
+                               const struct net_device *dev, const void *ctx,
+                               bool adding, struct notifier_block *nb)
 {
        return -EOPNOTSUPP;
 }
index 045ad16..d82b4b2 100644 (file)
@@ -242,7 +242,8 @@ extern bool initcall_debug;
        asm(".section   \"" __sec "\", \"a\"            \n"     \
            __stringify(__name) ":                      \n"     \
            ".long      " __stringify(__stub) " - .     \n"     \
-           ".previous                                  \n");
+           ".previous                                  \n");   \
+       static_assert(__same_type(initcall_t, &fn));
 #else
 #define ____define_initcall(fn, __unused, __name, __sec)       \
        static initcall_t __name __used                         \
index 2f34487..8583ed3 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/spinlock.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
+#include <linux/sched/stat.h>
 #include <linux/bug.h>
 #include <linux/minmax.h>
 #include <linux/mm.h>
@@ -146,7 +147,7 @@ static inline bool is_error_page(struct page *page)
  */
 #define KVM_REQ_TLB_FLUSH         (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_MMU_RELOAD        (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER     2
+#define KVM_REQ_UNBLOCK           2
 #define KVM_REQ_UNHALT            3
 #define KVM_REQUEST_ARCH_BASE     8
 
@@ -265,6 +266,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
        return !!map->hva;
 }
 
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+       return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
 /*
  * Sometimes a large or cross-page mmio needs to be broken up into separate
  * exits for userspace servicing.
@@ -1179,7 +1185,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 static inline unsigned long
 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+       /*
+        * The index was checked originally in search_memslots.  To avoid
+        * that a malicious guest builds a Spectre gadget out of e.g. page
+        * table walks, do not let the processor speculate loads outside
+        * the guest's registered memslots.
+        */
+       unsigned long offset = gfn - slot->base_gfn;
+       offset = array_index_nospec(offset, slot->npages);
+       return slot->userspace_addr + offset * PAGE_SIZE;
 }
 
 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
index a57af87..4a59664 100644 (file)
@@ -26,9 +26,7 @@ struct bd70528_data {
        struct mutex rtc_timer_lock;
 };
 
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
+#define BD70528_BUCK_VOLTS 0x10
 #define BD70528_LDO_VOLTS 0x20
 
 #define BD70528_REG_BUCK1_EN   0x0F
index c7ab69c..3b5f3a7 100644 (file)
@@ -26,11 +26,11 @@ enum {
        BD71828_REGULATOR_AMOUNT,
 };
 
-#define BD71828_BUCK1267_VOLTS         0xEF
-#define BD71828_BUCK3_VOLTS            0x10
-#define BD71828_BUCK4_VOLTS            0x20
-#define BD71828_BUCK5_VOLTS            0x10
-#define BD71828_LDO_VOLTS              0x32
+#define BD71828_BUCK1267_VOLTS         0x100
+#define BD71828_BUCK3_VOLTS            0x20
+#define BD71828_BUCK4_VOLTS            0x40
+#define BD71828_BUCK5_VOLTS            0x20
+#define BD71828_LDO_VOLTS              0x40
 /* LDO6 is fixed 1.8V voltage */
 #define BD71828_LDO_6_VOLTAGE          1800000
 
index 236a7d0..30bb59f 100644 (file)
@@ -630,6 +630,7 @@ struct mlx4_caps {
        bool                    wol_port[MLX4_MAX_PORTS + 1];
        struct mlx4_rate_limit_caps rl_caps;
        u32                     health_buffer_addrs;
+       bool                    map_clock_to_user;
 };
 
 struct mlx4_buf_list {
index f90f840..1efe374 100644 (file)
@@ -542,6 +542,10 @@ struct mlx5_core_roce {
 enum {
        MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
        MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+       /* Set during device detach to block any further devices
+        * creation/deletion on drivers rescan. Unset during device attach.
+        */
+       MLX5_PRIV_FLAGS_DETACH = 1 << 2,
 };
 
 struct mlx5_adev {
index 2d1ed78..e32a0d6 100644 (file)
@@ -11083,6 +11083,11 @@ struct mlx5_ifc_create_sampler_obj_in_bits {
        struct mlx5_ifc_sampler_obj_bits sampler_object;
 };
 
+struct mlx5_ifc_query_sampler_obj_out_bits {
+       struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+       struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
 enum {
        MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
        MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
index 028f442..60ffeb6 100644 (file)
@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
                         struct mlx5_hairpin_params *params);
 
 void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
 #endif /* __TRANSOBJ_H__ */
index a0434e8..6cf4c68 100644 (file)
@@ -1720,6 +1720,7 @@ struct zap_details {
        struct address_space *check_mapping;    /* Check page->mapping if set */
        pgoff_t first_index;                    /* Lowest page->index to unmap */
        pgoff_t last_index;                     /* Highest page->index to unmap */
+       struct page *single_page;               /* Locked page to be unmapped */
 };
 
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -1767,6 +1768,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
 extern int fixup_user_fault(struct mm_struct *mm,
                            unsigned long address, unsigned int fault_flags,
                            bool *unlocked);
+void unmap_mapping_page(struct page *page);
 void unmap_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t nr, bool even_cows);
 void unmap_mapping_range(struct address_space *mapping,
@@ -1787,6 +1789,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
        BUG();
        return -EFAULT;
 }
+static inline void unmap_mapping_page(struct page *page) { }
 static inline void unmap_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t nr, bool even_cows) { }
 static inline void unmap_mapping_range(struct address_space *mapping,
index ed6862e..862f88a 100644 (file)
@@ -452,13 +452,6 @@ struct mm_struct {
                 */
                atomic_t has_pinned;
 
-               /**
-                * @write_protect_seq: Locked when any thread is write
-                * protecting pages mapped by this mm to enforce a later COW,
-                * for instance during page table copying for fork().
-                */
-               seqcount_t write_protect_seq;
-
 #ifdef CONFIG_MMU
                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 #endif
@@ -467,6 +460,18 @@ struct mm_struct {
                spinlock_t page_table_lock; /* Protects page tables and some
                                             * counters
                                             */
+               /*
+                * With some kernel config, the current mmap_lock's offset
+                * inside 'mm_struct' is at 0x120, which is very optimal, as
+                * its two hot fields 'count' and 'owner' sit in 2 different
+                * cachelines,  and when mmap_lock is highly contended, both
+                * of the 2 fields will be accessed frequently, current layout
+                * will help to reduce cache bouncing.
+                *
+                * So please be careful with adding new fields before
+                * mmap_lock, which can easily push the 2 fields into one
+                * cacheline.
+                */
                struct rw_semaphore mmap_lock;
 
                struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -487,7 +492,15 @@ struct mm_struct {
                unsigned long stack_vm;    /* VM_STACK */
                unsigned long def_flags;
 
+               /**
+                * @write_protect_seq: Locked when any thread is write
+                * protecting pages mapped by this mm to enforce a later COW,
+                * for instance during page table copying for fork().
+                */
+               seqcount_t write_protect_seq;
+
                spinlock_t arg_lock; /* protect the below fields */
+
                unsigned long start_code, end_code, start_data, end_data;
                unsigned long start_brk, brk, start_stack;
                unsigned long arg_start, arg_end, env_start, env_end;
index 7d45b5f..8e291cf 100644 (file)
@@ -447,6 +447,7 @@ struct hv_vmbus_device_id {
 
 struct rpmsg_device_id {
        char name[RPMSG_NAME_SIZE];
+       kernel_ulong_t driver_data;
 };
 
 /* i2c */
index c20211e..2430650 100644 (file)
@@ -2344,6 +2344,7 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 struct device_node;
 struct irq_domain;
 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
 
 /* Arch may override this (weak) */
 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2351,6 +2352,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
 #else  /* CONFIG_OF */
 static inline struct irq_domain *
 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
 #endif  /* CONFIG_OF */
 
 static inline struct device_node *
index 46b1378..a43047b 100644 (file)
@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
  * where software maintains page access bit.
  */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+       return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
 #ifndef pte_savedwrite
 #define pte_savedwrite pte_write
 #endif
index fafc1be..9837fb0 100644 (file)
@@ -50,6 +50,7 @@ struct sysc_regbits {
        s8 emufree_shift;
 };
 
+#define SYSC_QUIRK_REINIT_ON_RESUME    BIT(27)
 #define SYSC_QUIRK_GPMC_DEBUG          BIT(26)
 #define SYSC_MODULE_QUIRK_ENA_RESETDONE        BIT(25)
 #define SYSC_MODULE_QUIRK_PRUSS                BIT(24)
index a311bdd..aba237c 100644 (file)
@@ -191,7 +191,7 @@ struct ptp_clock_event {
  *
  * @ppm:    Parts per million, but with a 16 bit binary fractional field
  */
-static inline s32 scaled_ppm_to_ppb(long ppm)
+static inline long scaled_ppm_to_ppb(long ppm)
 {
        /*
         * The 'freq' field in the 'struct timex' is in parts per
@@ -209,7 +209,7 @@ static inline s32 scaled_ppm_to_ppb(long ppm)
 
        ppb *= 125;
        ppb >>= 13;
-       return (s32)ppb;
+       return (long)ppb;
 }
 
 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
index def5c62..8d04e7d 100644 (file)
@@ -91,6 +91,7 @@ enum ttu_flags {
 
        TTU_SPLIT_HUGE_PMD      = 0x4,  /* split huge PMD if any */
        TTU_IGNORE_MLOCK        = 0x8,  /* ignore mlock */
+       TTU_SYNC                = 0x10, /* avoid racy checks with PVMW_SYNC */
        TTU_IGNORE_HWPOISON     = 0x20, /* corrupted page is recoverable */
        TTU_BATCH_FLUSH         = 0x40, /* Batch TLB flushes where possible
                                         * and caller guarantees they will
index 6f155f9..4ab7bfc 100644 (file)
@@ -1109,6 +1109,7 @@ struct pcr_ops {
 };
 
 enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE  {ASPM_MODE_CFG, ASPM_MODE_REG};
 
 #define ASPM_L1_1_EN                   BIT(0)
 #define ASPM_L1_2_EN                   BIT(1)
@@ -1234,6 +1235,7 @@ struct rtsx_pcr {
        u8                              card_drive_sel;
 #define ASPM_L1_EN                     0x02
        u8                              aspm_en;
+       enum ASPM_MODE                  aspm_mode;
        bool                            aspm_enabled;
 
 #define PCR_MS_PMOS                    (1 << 0)
index d2c8813..28a98fc 100644 (file)
@@ -350,11 +350,19 @@ struct load_weight {
  * Only for tasks we track a moving average of the past instantaneous
  * estimated utilization. This allows to absorb sporadic drops in utilization
  * of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
  */
 struct util_est {
        unsigned int                    enqueued;
        unsigned int                    ewma;
 #define UTIL_EST_WEIGHT_SHIFT          2
+#define UTIL_AVG_UNCHANGED             0x80000000
 } __attribute__((__aligned__(sizeof(u64))));
 
 /*
index bb19265..a86e852 100644 (file)
@@ -98,6 +98,7 @@ enum sctp_cid {
        SCTP_CID_I_FWD_TSN              = 0xC2,
        SCTP_CID_ASCONF_ACK             = 0x80,
        SCTP_CID_RECONF                 = 0x82,
+       SCTP_CID_PAD                    = 0x84,
 }; /* enum */
 
 
@@ -410,6 +411,12 @@ struct sctp_heartbeat_chunk {
 };
 
 
+/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */
+struct sctp_pad_chunk {
+       struct sctp_chunkhdr uh;
+};
+
+
 /* For the abort and shutdown ACK we must carry the init tag in the
  * common header. Just the common header is all that is needed with a
  * chunk descriptor.
index b8fc5c5..0d8e3dc 100644 (file)
@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
                            int __user *usockvec);
 extern int __sys_shutdown_sock(struct socket *sock, int how);
 extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
 #endif /* _LINUX_SOCKET_H */
index d81fe8b..61b622e 100644 (file)
@@ -368,6 +368,8 @@ struct rpc_xprt *   xprt_alloc(struct net *net, size_t size,
                                unsigned int num_prealloc,
                                unsigned int max_req);
 void                   xprt_free(struct rpc_xprt *);
+void                   xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
+bool                   xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
 
 static inline int
 xprt_enable_swap(struct rpc_xprt *xprt)
index d9b7c91..6430a94 100644 (file)
 #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
 #define SWP_OFFSET_MASK        ((1UL << SWP_TYPE_SHIFT) - 1)
 
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+       if (pte_swp_soft_dirty(pte))
+               pte = pte_swp_clear_soft_dirty(pte);
+       if (pte_swp_uffd_wp(pte))
+               pte = pte_swp_clear_uffd_wp(pte);
+       return pte;
+}
+
 /*
  * Store a type+offset into a swp_entry_t in an arch-independent format
  */
@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
 {
        swp_entry_t arch_entry;
 
-       if (pte_swp_soft_dirty(pte))
-               pte = pte_swp_clear_soft_dirty(pte);
-       if (pte_swp_uffd_wp(pte))
-               pte = pte_swp_clear_uffd_wp(pte);
+       pte = pte_swp_clear_flags(pte);
        arch_entry = __pte_to_swp_entry(pte);
        return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
 }
index 7340613..1a0ff88 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/context_tracking_state.h>
 #include <linux/cpumask.h>
 #include <linux/sched.h>
+#include <linux/rcupdate.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void __init tick_init(void);
@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
                __tick_nohz_task_switch();
 }
 
+static inline void tick_nohz_user_enter_prepare(void)
+{
+       if (tick_nohz_full_cpu(smp_processor_id()))
+               rcu_nocb_flush_deferred_wakeup();
+}
+
 #endif
index bf00259..96b7ff6 100644 (file)
@@ -460,7 +460,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
 #define PD_T_RECEIVER_RESPONSE 15      /* 15ms max */
 #define PD_T_SOURCE_ACTIVITY   45
 #define PD_T_SINK_ACTIVITY     135
-#define PD_T_SINK_WAIT_CAP     240
+#define PD_T_SINK_WAIT_CAP     310     /* 310 - 620 ms */
 #define PD_T_PS_TRANSITION     500
 #define PD_T_SRC_TRANSITION    35
 #define PD_T_DRP_SNK           40
index 0eb83ce..b517ebc 100644 (file)
@@ -24,8 +24,4 @@ enum usb_pd_ext_sdb_fields {
 #define USB_PD_EXT_SDB_EVENT_OVP               BIT(3)
 #define USB_PD_EXT_SDB_EVENT_CF_CV_MODE                BIT(4)
 
-#define USB_PD_EXT_SDB_PPS_EVENTS      (USB_PD_EXT_SDB_EVENT_OCP |     \
-                                        USB_PD_EXT_SDB_EVENT_OTP |     \
-                                        USB_PD_EXT_SDB_EVENT_OVP)
-
 #endif /* __LINUX_USB_PD_EXT_SDB_H */
index 430a3a0..9fac819 100644 (file)
@@ -6,8 +6,10 @@
 
 #include <linux/device.h>
 #include <linux/kernel.h>
+#include <linux/poll.h>
 #include <linux/skbuff.h>
 #include <linux/netlink.h>
+#include <linux/netdevice.h>
 
 /**
  * enum wwan_port_type - WWAN port types
@@ -40,15 +42,23 @@ struct wwan_port;
 /** struct wwan_port_ops - The WWAN port operations
  * @start: The routine for starting the WWAN port device.
  * @stop: The routine for stopping the WWAN port device.
- * @tx: The routine that sends WWAN port protocol data to the device.
+ * @tx: Non-blocking routine that sends WWAN port protocol data to the device.
+ * @tx_blocking: Optional blocking routine that sends WWAN port protocol data
+ *               to the device.
+ * @tx_poll: Optional routine that sets additional TX poll flags.
  *
  * The wwan_port_ops structure contains a list of low-level operations
- * that control a WWAN port device. All functions are mandatory.
+ * that control a WWAN port device. All functions are mandatory unless specified.
  */
 struct wwan_port_ops {
        int (*start)(struct wwan_port *port);
        void (*stop)(struct wwan_port *port);
        int (*tx)(struct wwan_port *port, struct sk_buff *skb);
+
+       /* Optional operations */
+       int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb);
+       __poll_t (*tx_poll)(struct wwan_port *port, struct file *filp,
+                           poll_table *wait);
 };
 
 /**
@@ -118,15 +128,36 @@ void wwan_port_txon(struct wwan_port *port);
 void *wwan_port_get_drvdata(struct wwan_port *port);
 
 /**
+ * struct wwan_netdev_priv - WWAN core network device private data
+ * @link_id: WWAN device data link id
+ * @drv_priv: driver private data area, size is determined in &wwan_ops
+ */
+struct wwan_netdev_priv {
+       u32 link_id;
+
+       /* must be last */
+       u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+static inline void *wwan_netdev_drvpriv(struct net_device *dev)
+{
+       return ((struct wwan_netdev_priv *)netdev_priv(dev))->drv_priv;
+}
+
+/*
+ * Used to indicate that the WWAN core should not create a default network
+ * link.
+ */
+#define WWAN_NO_DEFAULT_LINK           U32_MAX
+
+/**
  * struct wwan_ops - WWAN device ops
- * @owner: module owner of the WWAN ops
  * @priv_size: size of private netdev data area
  * @setup: set up a new netdev
  * @newlink: register the new netdev
  * @dellink: remove the given netdev
  */
 struct wwan_ops {
-       struct module *owner;
        unsigned int priv_size;
        void (*setup)(struct net_device *dev);
        int (*newlink)(void *ctxt, struct net_device *dev,
@@ -136,7 +167,7 @@ struct wwan_ops {
 };
 
 int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
-                     void *ctxt);
+                     void *ctxt, u32 def_link_id);
 
 void wwan_unregister_ops(struct device *parent);
 
index 58c2cd4..161cdf7 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014 Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #include <linux/ethtool.h>
@@ -22,6 +22,7 @@
 #include <linux/if_ether.h>
 #include <linux/ieee80211.h>
 #include <linux/net.h>
+#include <linux/rfkill.h>
 #include <net/regulatory.h>
 
 /**
@@ -370,11 +371,18 @@ struct ieee80211_sta_he_cap {
  * @he_cap: holds the HE capabilities
  * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a
  *     6 GHz band channel (and 0 may be valid value).
+ * @vendor_elems: vendor element(s) to advertise
+ * @vendor_elems.data: vendor element(s) data
+ * @vendor_elems.len: vendor element(s) length
  */
 struct ieee80211_sband_iftype_data {
        u16 types_mask;
        struct ieee80211_sta_he_cap he_cap;
        struct ieee80211_he_6ghz_capa he_6ghz_capa;
+       struct {
+               const u8 *data;
+               unsigned int len;
+       } vendor_elems;
 };
 
 /**
@@ -534,18 +542,6 @@ ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband,
 }
 
 /**
- * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
- * @sband: the sband to search for the STA on
- *
- * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
- */
-static inline const struct ieee80211_sta_he_cap *
-ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
-{
-       return ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_STATION);
-}
-
-/**
  * ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities
  * @sband: the sband to search for the STA on
  * @iftype: the iftype to search for
@@ -906,6 +902,17 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
 }
 
 /**
+ * cfg80211_any_usable_channels - check for usable channels
+ * @wiphy: the wiphy to check for
+ * @band_mask: which bands to check on
+ * @prohibited_flags: which channels to not consider usable,
+ *     %IEEE80211_CHAN_DISABLED is always taken into account
+ */
+bool cfg80211_any_usable_channels(struct wiphy *wiphy,
+                                 unsigned long band_mask,
+                                 u32 prohibited_flags);
+
+/**
  * enum survey_info_flags - survey information flags
  *
  * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -1245,8 +1252,6 @@ struct cfg80211_csa_settings {
        u8 count;
 };
 
-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
-
 /**
  * struct iface_combination_params - input parameters for interface combinations
  *
@@ -3522,7 +3527,10 @@ struct cfg80211_pmsr_result {
  *              If neither @trigger_based nor @non_trigger_based is set,
  *              EDCA based ranging will be used.
  * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either
- *     @trigger_based or @non_trigger_based is set.
+ *              @trigger_based or @non_trigger_based is set.
+ * @bss_color: the bss color of the responder. Optional. Set to zero to
+ *     indicate the driver should set the BSS color. Only valid if
+ *     @non_trigger_based or @trigger_based is set.
  *
  * See also nl80211 for the respective attribute documentation.
  */
@@ -3540,6 +3548,7 @@ struct cfg80211_pmsr_ftm_request_peer {
        u8 burst_duration;
        u8 ftms_per_burst;
        u8 ftmr_retries;
+       u8 bss_color;
 };
 
 /**
@@ -4945,6 +4954,7 @@ struct wiphy_iftype_akm_suites {
  *     configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
  *     %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
  * @sar_capa: SAR control capabilities
+ * @rfkill: a pointer to the rfkill structure
  */
 struct wiphy {
        struct mutex mtx;
@@ -5087,6 +5097,8 @@ struct wiphy {
 
        const struct cfg80211_sar_capa *sar_capa;
 
+       struct rfkill *rfkill;
+
        char priv[] __aligned(NETDEV_ALIGN);
 };
 
@@ -6661,7 +6673,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy);
  * wiphy_rfkill_stop_polling - stop polling rfkill
  * @wiphy: the wiphy
  */
-void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
+static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
+{
+       rfkill_pause_polling(wiphy->rfkill);
+}
 
 /**
  * DOC: Vendor commands
@@ -8154,6 +8169,8 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
        dev_notice(&(wiphy)->dev, format, ##args)
 #define wiphy_info(wiphy, format, args...)                     \
        dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_info_once(wiphy, format, args...)                        \
+       dev_info_once(&(wiphy)->dev, format, ##args)
 
 #define wiphy_err_ratelimited(wiphy, format, args...)          \
        dev_err_ratelimited(&(wiphy)->dev, format, ##args)
index 289d68e..ea47783 100644 (file)
@@ -409,6 +409,21 @@ static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
        return NULL;
 }
 
+static inline bool dsa_port_is_dsa(struct dsa_port *port)
+{
+       return port->type == DSA_PORT_TYPE_DSA;
+}
+
+static inline bool dsa_port_is_cpu(struct dsa_port *port)
+{
+       return port->type == DSA_PORT_TYPE_CPU;
+}
+
+static inline bool dsa_port_is_user(struct dsa_port *dp)
+{
+       return dp->type == DSA_PORT_TYPE_USER;
+}
+
 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
 {
        return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
index fd84adc..caddf4a 100644 (file)
@@ -57,5 +57,6 @@ int icmp_rcv(struct sk_buff *skb);
 int icmp_err(struct sk_buff *skb, u32 info);
 int icmp_init(void);
 void icmp_out_count(struct net *net, unsigned char type);
+bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr);
 
 #endif /* _ICMP_H */
index 445b66c..d8a1d09 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  */
 
 #ifndef MAC80211_H
@@ -526,6 +526,7 @@ struct ieee80211_fils_discovery {
  * @twt_responder: does this BSS support TWT requester (relevant for managed
  *     mode only, set if the AP advertises TWT responder role)
  * @twt_protected: does this BSS support protected TWT frames
+ * @twt_broadcast: does this BSS support broadcast TWT
  * @assoc: association status
  * @ibss_joined: indicates whether this station is part of an IBSS
  *     or not
@@ -642,6 +643,7 @@ struct ieee80211_bss_conf {
        bool twt_requester;
        bool twt_responder;
        bool twt_protected;
+       bool twt_broadcast;
        /* association related data */
        bool assoc, ibss_joined;
        bool ibss_creator;
@@ -3345,6 +3347,21 @@ enum ieee80211_reconfig_type {
 };
 
 /**
+ * struct ieee80211_prep_tx_info - prepare TX information
+ * @duration: if non-zero, hint about the required duration,
+ *     only used with the mgd_prepare_tx() method.
+ * @subtype: frame subtype (auth, (re)assoc, deauth, disassoc)
+ * @success: whether the frame exchange was successful, only
+ *     used with the mgd_complete_tx() method, and then only
+ *     valid for auth and (re)assoc.
+ */
+struct ieee80211_prep_tx_info {
+       u16 duration;
+       u16 subtype;
+       u8 success:1;
+};
+
+/**
  * struct ieee80211_ops - callbacks from mac80211 to the driver
  *
  * This structure contains various callbacks that the driver may
@@ -3756,9 +3773,13 @@ enum ieee80211_reconfig_type {
  *     frame in case that no beacon was heard from the AP/P2P GO.
  *     The callback will be called before each transmission and upon return
  *     mac80211 will transmit the frame right away.
- *      If duration is greater than zero, mac80211 hints to the driver the
- *      duration for which the operation is requested.
+ *     Additional information is passed in the &struct ieee80211_prep_tx_info
+ *     data. If duration there is greater than zero, mac80211 hints to the
+ *     driver the duration for which the operation is requested.
  *     The callback is optional and can (should!) sleep.
+ * @mgd_complete_tx: Notify the driver that the response frame for a previously
+ *     transmitted frame announced with @mgd_prepare_tx was received, the data
+ *     is filled similarly to @mgd_prepare_tx though the duration is not used.
  *
  * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
  *     a TDLS discovery-request, we expect a reply to arrive on the AP's
@@ -4109,7 +4130,10 @@ struct ieee80211_ops {
 
        void    (*mgd_prepare_tx)(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
-                                 u16 duration);
+                                 struct ieee80211_prep_tx_info *info);
+       void    (*mgd_complete_tx)(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_prep_tx_info *info);
 
        void    (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
                                             struct ieee80211_vif *vif);
@@ -5537,7 +5561,7 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
  *
  * This function iterates over the interfaces associated with a given
  * hardware that are currently active and calls the callback for them.
- * This version can only be used while holding the RTNL.
+ * This version can only be used while holding the wiphy mutex.
  *
  * @hw: the hardware struct of which the interfaces should be iterated over
  * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
@@ -6184,6 +6208,11 @@ enum rate_control_capabilities {
         * otherwise the NSS difference doesn't bother us.
         */
        RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0),
+       /**
+        * @RATE_CTRL_CAPA_AMPDU_TRIGGER:
+        * mac80211 should start A-MPDU sessions on tx
+        */
+       RATE_CTRL_CAPA_AMPDU_TRIGGER = BIT(1),
 };
 
 struct rate_control_ops {
@@ -6392,7 +6421,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
 
 /**
  * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
- *                              of injected frames
+ *                              of injected frames.
+ *
+ * To accurately parse and take into account rate and retransmission fields,
+ * you must initialize the chandef field in the ieee80211_tx_info structure
+ * of the skb before calling this function.
+ *
  * @skb: packet injected by userspace
  * @dev: the &struct device of this 802.11 device
  */
@@ -6571,9 +6605,6 @@ static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
 {
 }
 
-void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
-                             struct ieee80211_txq *txq, bool force);
-
 /**
  * ieee80211_schedule_txq - schedule a TXQ for transmission
  *
@@ -6586,11 +6617,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
  * The driver may call this function if it has buffered packets for
  * this TXQ internally.
  */
-static inline void
-ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
-{
-       __ieee80211_schedule_txq(hw, txq, true);
-}
+void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
 
 /**
  * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
@@ -6602,12 +6629,8 @@ ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
  * The driver may set force=true if it has buffered packets for this TXQ
  * internally.
  */
-static inline void
-ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
-                    bool force)
-{
-       __ieee80211_schedule_txq(hw, txq, force);
-}
+void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+                         bool force);
 
 /**
  * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
@@ -6747,4 +6770,22 @@ struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
 struct sk_buff *
 ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
                                          struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_is_tx_data - check if frame is a data frame
+ *
+ * The function is used to check if a frame is a data frame. Frames with
+ * hardware encapsulation enabled are data frames.
+ *
+ * @skb: the frame to be transmitted.
+ */
+static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (void *) skb->data;
+
+       return info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+              ieee80211_is_data(hdr->frame_control);
+}
+
 #endif /* MAC80211_H */
index 83f2377..cb580b0 100644 (file)
@@ -23,6 +23,7 @@ struct mptcp_ext {
        u64             data_seq;
        u32             subflow_seq;
        u16             data_len;
+       __sum16         csum;
        u8              use_map:1,
                        dsn64:1,
                        data_fin:1,
@@ -31,7 +32,8 @@ struct mptcp_ext {
                        mpc_map:1,
                        frozen:1,
                        reset_transient:1;
-       u8              reset_reason:4;
+       u8              reset_reason:4,
+                       csum_reqd:1;
 };
 
 #define MPTCP_RM_IDS_MAX       8
@@ -63,8 +65,10 @@ struct mptcp_out_options {
        struct mptcp_rm_list rm_list;
        u8 join_id;
        u8 backup;
-       u8 reset_reason:4;
-       u8 reset_transient:1;
+       u8 reset_reason:4,
+          reset_transient:1,
+          csum_reqd:1,
+          allow_join_id0:1;
        u32 nonce;
        u64 thmac;
        u32 token;
index befc5b9..12cf6d7 100644 (file)
@@ -188,6 +188,9 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
 
 void net_ns_barrier(void);
+
+struct ns_common *get_net_ns(struct ns_common *ns);
+struct net *get_net_ns_by_fd(int fd);
 #else /* CONFIG_NET_NS */
 #include <linux/sched.h>
 #include <linux/nsproxy.h>
@@ -207,13 +210,22 @@ static inline void net_ns_get_ownership(const struct net *net,
 }
 
 static inline void net_ns_barrier(void) {}
+
+static inline struct ns_common *get_net_ns(struct ns_common *ns)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct net *get_net_ns_by_fd(int fd)
+{
+       return ERR_PTR(-EINVAL);
+}
 #endif /* CONFIG_NET_NS */
 
 
 extern struct list_head net_namespace_list;
 
 struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int fd);
 
 #ifdef CONFIG_SYSCTL
 void ipx_register_sysctl(void);
index 96f9cf8..1f47bef 100644 (file)
@@ -159,22 +159,26 @@ unsigned int nf_ct_port_nlattr_tuple_size(void);
 extern const struct nla_policy nf_ct_port_nla_policy[];
 
 #ifdef CONFIG_SYSCTL
-__printf(3, 4) __cold
+__printf(4, 5) __cold
 void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const struct nf_conn *ct,
+                              const struct nf_hook_state *state,
                               const char *fmt, ...);
-__printf(5, 6) __cold
+__printf(4, 5) __cold
 void nf_l4proto_log_invalid(const struct sk_buff *skb,
-                           struct net *net,
-                           u16 pf, u8 protonum,
+                           const struct nf_hook_state *state,
+                           u8 protonum,
                            const char *fmt, ...);
 #else
-static inline __printf(5, 6) __cold
-void nf_l4proto_log_invalid(const struct sk_buff *skb, struct net *net,
-                           u16 pf, u8 protonum, const char *fmt, ...) {}
-static inline __printf(3, 4) __cold
+static inline __printf(4, 5) __cold
+void nf_l4proto_log_invalid(const struct sk_buff *skb,
+                           const struct nf_hook_state *state,
+                           u8 protonum,
+                           const char *fmt, ...) {}
+static inline __printf(4, 5) __cold
 void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const struct nf_conn *ct,
+                              const struct nf_hook_state *state,
                               const char *fmt, ...) { }
 #endif /* CONFIG_SYSCTL */
 
index 46c8d5b..0fa5a6d 100644 (file)
@@ -16,6 +16,7 @@ extern struct nft_expr_type nft_range_type;
 extern struct nft_expr_type nft_meta_type;
 extern struct nft_expr_type nft_rt_type;
 extern struct nft_expr_type nft_exthdr_type;
+extern struct nft_expr_type nft_last_type;
 
 #ifdef CONFIG_NETWORK_SECMARK
 extern struct nft_object_type nft_secmark_obj_type;
index a0f315e..4024072 100644 (file)
@@ -84,6 +84,9 @@ struct netns_sctp {
        /* HB.interval              - 30 seconds  */
        unsigned int hb_interval;
 
+       /* The interval for PLPMTUD probe timer */
+       unsigned int probe_interval;
+
        /* Association.Max.Retrans  - 10 attempts
         * Path.Max.Retrans         - 5  attempts (per destination address)
         * Max.Init.Retransmits     - 8  attempts
index e816b6a..e946366 100644 (file)
@@ -42,6 +42,7 @@ struct netns_xfrm {
        struct hlist_head       __rcu *state_bydst;
        struct hlist_head       __rcu *state_bysrc;
        struct hlist_head       __rcu *state_byspi;
+       struct hlist_head       __rcu *state_byseq;
        unsigned int            state_hmask;
        unsigned int            state_num;
        struct work_struct      state_hash_work;
index 1e62551..c99ffe9 100644 (file)
@@ -37,8 +37,15 @@ enum qdisc_state_t {
        __QDISC_STATE_SCHED,
        __QDISC_STATE_DEACTIVATED,
        __QDISC_STATE_MISSED,
+       __QDISC_STATE_DRAINING,
 };
 
+#define QDISC_STATE_MISSED     BIT(__QDISC_STATE_MISSED)
+#define QDISC_STATE_DRAINING   BIT(__QDISC_STATE_DRAINING)
+
+#define QDISC_STATE_NON_EMPTY  (QDISC_STATE_MISSED | \
+                                       QDISC_STATE_DRAINING)
+
 struct qdisc_size_table {
        struct rcu_head         rcu;
        struct list_head        list;
@@ -110,8 +117,6 @@ struct Qdisc {
        spinlock_t              busylock ____cacheline_aligned_in_smp;
        spinlock_t              seqlock;
 
-       /* for NOLOCK qdisc, true if there are no enqueued skbs */
-       bool                    empty;
        struct rcu_head         rcu;
 
        /* private data */
@@ -145,6 +150,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
        return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
 }
 
+static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
+{
+       return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
+}
+
 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
 {
        return q->flags & TCQ_F_CPUSTATS;
@@ -153,7 +163,7 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
 {
        if (qdisc_is_percpu_stats(qdisc))
-               return READ_ONCE(qdisc->empty);
+               return nolock_qdisc_is_empty(qdisc);
        return !READ_ONCE(qdisc->q.qlen);
 }
 
@@ -161,7 +171,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 {
        if (qdisc->flags & TCQ_F_NOLOCK) {
                if (spin_trylock(&qdisc->seqlock))
-                       goto nolock_empty;
+                       return true;
 
                /* If the MISSED flag is set, it means other thread has
                 * set the MISSED flag before second spin_trylock(), so
@@ -183,11 +193,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
                /* Retry again in case other CPU may not see the new flag
                 * after it releases the lock at the end of qdisc_run_end().
                 */
-               if (!spin_trylock(&qdisc->seqlock))
-                       return false;
-
-nolock_empty:
-               WRITE_ONCE(qdisc->empty, false);
+               return spin_trylock(&qdisc->seqlock);
        } else if (qdisc_is_running(qdisc)) {
                return false;
        }
@@ -201,15 +207,14 @@ nolock_empty:
 
 static inline void qdisc_run_end(struct Qdisc *qdisc)
 {
-       write_seqcount_end(&qdisc->running);
        if (qdisc->flags & TCQ_F_NOLOCK) {
                spin_unlock(&qdisc->seqlock);
 
                if (unlikely(test_bit(__QDISC_STATE_MISSED,
-                                     &qdisc->state))) {
-                       clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+                                     &qdisc->state)))
                        __netif_schedule(qdisc);
-               }
+       } else {
+               write_seqcount_end(&qdisc->running);
        }
 }
 
index 5e84888..2058fab 100644 (file)
@@ -59,6 +59,7 @@ enum sctp_verb {
        SCTP_CMD_HB_TIMERS_START,    /* Start the heartbeat timers. */
        SCTP_CMD_HB_TIMER_UPDATE,    /* Update a heartbeat timers.  */
        SCTP_CMD_HB_TIMERS_STOP,     /* Stop the heartbeat timers.  */
+       SCTP_CMD_PROBE_TIMER_UPDATE, /* Update a probe timer.  */
        SCTP_CMD_TRANSPORT_HB_SENT,  /* Reset the status of a transport. */
        SCTP_CMD_TRANSPORT_IDLE,     /* Do manipulations on idle transport */
        SCTP_CMD_TRANSPORT_ON,       /* Mark the transport as active. */
index 14a0d22..265fffa 100644 (file)
@@ -77,6 +77,7 @@ enum sctp_event_timeout {
        SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
        SCTP_EVENT_TIMEOUT_HEARTBEAT,
        SCTP_EVENT_TIMEOUT_RECONF,
+       SCTP_EVENT_TIMEOUT_PROBE,
        SCTP_EVENT_TIMEOUT_SACK,
        SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 };
@@ -200,6 +201,23 @@ enum sctp_sock_state {
        SCTP_SS_CLOSING        = TCP_CLOSE_WAIT,
 };
 
+enum sctp_plpmtud_state {
+       SCTP_PL_DISABLED,
+       SCTP_PL_BASE,
+       SCTP_PL_SEARCH,
+       SCTP_PL_COMPLETE,
+       SCTP_PL_ERROR,
+};
+
+#define        SCTP_BASE_PLPMTU        1200
+#define        SCTP_MAX_PLPMTU         9000
+#define        SCTP_MIN_PLPMTU         512
+
+#define        SCTP_MAX_PROBES         3
+
+#define SCTP_PL_BIG_STEP       32
+#define SCTP_PL_MIN_STEP       4
+
 /* These functions map various type to printable names.  */
 const char *sctp_cname(const union sctp_subtype id);   /* chunk types */
 const char *sctp_oname(const union sctp_subtype id);   /* other events */
@@ -424,4 +442,6 @@ enum {
  */
 #define SCTP_AUTH_RANDOM_LENGTH 32
 
+#define SCTP_PROBE_TIMER_MIN   5000
+
 #endif /* __sctp_constants_h__ */
index 86f74f2..69bab88 100644 (file)
@@ -145,6 +145,8 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
                             struct sctphdr *, struct sctp_association **,
                             struct sctp_transport **);
 void sctp_err_finish(struct sock *, struct sctp_transport *);
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb);
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb);
 void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
                           struct sctp_transport *t, __u32 pmtu);
 void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
@@ -573,14 +575,15 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *
 /* Calculate max payload size given a MTU, or the total overhead if
  * given MTU is zero
  */
-static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
-                                    __u32 mtu, __u32 extra)
+static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp,
+                                      const struct sctp_transport *t,
+                                      __u32 mtu, __u32 extra)
 {
        __u32 overhead = sizeof(struct sctphdr) + extra;
 
        if (sp) {
                overhead += sp->pf->af->net_header_len;
-               if (sp->udp_port)
+               if (sp->udp_port && (!t || t->encap_port))
                        overhead += sizeof(struct udphdr);
        } else {
                overhead += sizeof(struct ipv6hdr);
@@ -592,6 +595,12 @@ static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
        return mtu ? mtu - overhead : overhead;
 }
 
+static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
+                                    __u32 mtu, __u32 extra)
+{
+       return __sctp_mtu_payload(sp, NULL, mtu, extra);
+}
+
 static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
 {
        return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
@@ -615,6 +624,48 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
        return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
 }
 
+static inline int sctp_transport_pl_hlen(struct sctp_transport *t)
+{
+       return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0);
+}
+
+static inline void sctp_transport_pl_reset(struct sctp_transport *t)
+{
+       if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) &&
+           (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) {
+               if (t->pl.state == SCTP_PL_DISABLED) {
+                       t->pl.state = SCTP_PL_BASE;
+                       t->pl.pmtu = SCTP_BASE_PLPMTU;
+                       t->pl.probe_size = SCTP_BASE_PLPMTU;
+                       sctp_transport_reset_probe_timer(t);
+               }
+       } else {
+               if (t->pl.state != SCTP_PL_DISABLED) {
+                       if (del_timer(&t->probe_timer))
+                               sctp_transport_put(t);
+                       t->pl.state = SCTP_PL_DISABLED;
+               }
+       }
+}
+
+static inline void sctp_transport_pl_update(struct sctp_transport *t)
+{
+       if (t->pl.state == SCTP_PL_DISABLED)
+               return;
+
+       if (del_timer(&t->probe_timer))
+               sctp_transport_put(t);
+
+       t->pl.state = SCTP_PL_BASE;
+       t->pl.pmtu = SCTP_BASE_PLPMTU;
+       t->pl.probe_size = SCTP_BASE_PLPMTU;
+}
+
+static inline bool sctp_transport_pl_enabled(struct sctp_transport *t)
+{
+       return t->pl.state != SCTP_PL_DISABLED;
+}
+
 static inline bool sctp_newsk_ready(const struct sock *sk)
 {
        return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
index fd223c9..2eb6d7c 100644 (file)
@@ -151,6 +151,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
 /* Prototypes for timeout event state functions.  */
 sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
 sctp_state_fn_t sctp_sf_send_reconf;
+sctp_state_fn_t sctp_sf_send_probe;
 sctp_state_fn_t sctp_sf_do_6_2_sack;
 sctp_state_fn_t sctp_sf_autoclose_timer_expire;
 
@@ -225,11 +226,13 @@ struct sctp_chunk *sctp_make_new_encap_port(
                                        const struct sctp_association *asoc,
                                        const struct sctp_chunk *chunk);
 struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
-                                      const struct sctp_transport *transport);
+                                      const struct sctp_transport *transport,
+                                      __u32 probe_size);
 struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
                                           const struct sctp_chunk *chunk,
                                           const void *payload,
                                           const size_t paylen);
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len);
 struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
                                      const struct sctp_chunk *chunk,
                                      __be16 cause_code, const void *payload,
@@ -310,6 +313,7 @@ int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
 void sctp_generate_t3_rtx_event(struct timer_list *t);
 void sctp_generate_heartbeat_event(struct timer_list *t);
 void sctp_generate_reconf_event(struct timer_list *t);
+void sctp_generate_probe_event(struct timer_list *t);
 void sctp_generate_proto_unreach_event(struct timer_list *t);
 
 void sctp_ootb_pkt_free(struct sctp_packet *packet);
index 1aa5852..c4a4c17 100644 (file)
@@ -177,6 +177,7 @@ struct sctp_sock {
         * will be inherited by all new associations.
         */
        __u32 hbinterval;
+       __u32 probe_interval;
 
        __be16 udp_port;
        __be16 encap_port;
@@ -385,6 +386,7 @@ struct sctp_sender_hb_info {
        union sctp_addr daddr;
        unsigned long sent_at;
        __u64 hb_nonce;
+       __u32 probe_size;
 };
 
 int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
@@ -656,6 +658,7 @@ struct sctp_chunk {
                data_accepted:1,        /* At least 1 chunk accepted */
                auth:1,                 /* IN: was auth'ed | OUT: needs auth */
                has_asconf:1,           /* IN: have seen an asconf before */
+               pmtu_probe:1,           /* Used by PLPMTUD, can be set in s HB chunk */
                tsn_missing_report:2,   /* Data chunk missing counter. */
                fast_retransmit:2;      /* Is this chunk fast retransmitted? */
 };
@@ -858,6 +861,7 @@ struct sctp_transport {
         * the destination address every heartbeat interval.
         */
        unsigned long hbinterval;
+       unsigned long probe_interval;
 
        /* SACK delay timeout */
        unsigned long sackdelay;
@@ -934,6 +938,9 @@ struct sctp_transport {
        /* Timer to handler reconf chunk rtx */
        struct timer_list reconf_timer;
 
+       /* Timer to send a probe HB packet for PLPMTUD */
+       struct timer_list probe_timer;
+
        /* Since we're using per-destination retransmission timers
         * (see above), we're also using per-destination "transmitted"
         * queues.  This probably ought to be a private struct
@@ -976,6 +983,15 @@ struct sctp_transport {
                char cacc_saw_newack;
        } cacc;
 
+       struct {
+               __u16 pmtu;
+               __u16 probe_size;
+               __u16 probe_high;
+               __u8 probe_count:3;
+               __u8 raise_count:5;
+               __u8 state;
+       } pl; /* plpmtud related */
+
        /* 64-bit random number sent with heartbeat. */
        __u64 hb_nonce;
 
@@ -993,6 +1009,7 @@ void sctp_transport_free(struct sctp_transport *);
 void sctp_transport_reset_t3_rtx(struct sctp_transport *);
 void sctp_transport_reset_hb_timer(struct sctp_transport *);
 void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport);
 int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1007,6 +1024,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
 void sctp_transport_dst_release(struct sctp_transport *t);
 void sctp_transport_dst_confirm(struct sctp_transport *t);
+void sctp_transport_pl_send(struct sctp_transport *t);
+void sctp_transport_pl_recv(struct sctp_transport *t);
 
 
 /* This is the structure we use to queue packets as they come into
@@ -1795,6 +1814,7 @@ struct sctp_association {
         * will be inherited by all new transports.
         */
        unsigned long hbinterval;
+       unsigned long probe_interval;
 
        __be16 encap_port;
 
index 9b341c2..ced2fc9 100644 (file)
@@ -1934,7 +1934,8 @@ static inline u32 net_tx_rndhash(void)
 
 static inline void sk_set_txhash(struct sock *sk)
 {
-       sk->sk_txhash = net_tx_rndhash();
+       /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
+       WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
 }
 
 static inline bool sk_rethink_txhash(struct sock *sk)
@@ -2206,9 +2207,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
 
 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
 {
-       if (sk->sk_txhash) {
+       /* This pairs with WRITE_ONCE() in sk_set_txhash() */
+       u32 txhash = READ_ONCE(sk->sk_txhash);
+
+       if (txhash) {
                skb->l4_hash = 1;
-               skb->hash = sk->sk_txhash;
+               skb->hash = txhash;
        }
 }
 
@@ -2266,8 +2270,13 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
 static inline int sock_error(struct sock *sk)
 {
        int err;
-       if (likely(!sk->sk_err))
+
+       /* Avoid an atomic operation for the common case.
+        * This is racy since another cpu/thread can change sk_err under us.
+        */
+       if (likely(data_race(!sk->sk_err)))
                return 0;
+
        err = xchg(&sk->sk_err, 0);
        return -err;
 }
index f1a5a9a..e4cac92 100644 (file)
@@ -202,6 +202,7 @@ enum switchdev_notifier_type {
 struct switchdev_notifier_info {
        struct net_device *dev;
        struct netlink_ext_ack *extack;
+       const void *ctx;
 };
 
 struct switchdev_notifier_fdb_info {
@@ -268,19 +269,19 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
 int switchdev_handle_port_obj_add(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*add_cb)(struct net_device *dev,
+                       int (*add_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj,
                                      struct netlink_ext_ack *extack));
 int switchdev_handle_port_obj_del(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*del_cb)(struct net_device *dev,
+                       int (*del_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj));
 
 int switchdev_handle_port_attr_set(struct net_device *dev,
                        struct switchdev_notifier_port_attr_info *port_attr_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*set_cb)(struct net_device *dev,
+                       int (*set_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_attr *attr,
                                      struct netlink_ext_ack *extack));
 #else
@@ -352,7 +353,7 @@ static inline int
 switchdev_handle_port_obj_add(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*add_cb)(struct net_device *dev,
+                       int (*add_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj,
                                      struct netlink_ext_ack *extack))
 {
@@ -363,7 +364,7 @@ static inline int
 switchdev_handle_port_obj_del(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*del_cb)(struct net_device *dev,
+                       int (*del_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj))
 {
        return 0;
@@ -373,7 +374,7 @@ static inline int
 switchdev_handle_port_attr_set(struct net_device *dev,
                        struct switchdev_notifier_port_attr_info *port_attr_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*set_cb)(struct net_device *dev,
+                       int (*set_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_attr *attr,
                                      struct netlink_ext_ack *extack))
 {
index 8341a8d..8d398a5 100644 (file)
@@ -79,8 +79,6 @@
        __SNMP_INC_STATS((net)->mib.tls_statistics, field)
 #define TLS_INC_STATS(net, field)                              \
        SNMP_INC_STATS((net)->mib.tls_statistics, field)
-#define __TLS_DEC_STATS(net, field)                            \
-       __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
 #define TLS_DEC_STATS(net, field)                              \
        SNMP_DEC_STATS((net)->mib.tls_statistics, field)
 
index c58a6d4..b7e65ae 100644 (file)
@@ -145,6 +145,12 @@ enum {
        XFRM_MODE_FLAG_TUNNEL = 1,
 };
 
+enum xfrm_replay_mode {
+       XFRM_REPLAY_MODE_LEGACY,
+       XFRM_REPLAY_MODE_BMP,
+       XFRM_REPLAY_MODE_ESN,
+};
+
 /* Full description of state of transformer. */
 struct xfrm_state {
        possible_net_t          xs_net;
@@ -154,6 +160,7 @@ struct xfrm_state {
        };
        struct hlist_node       bysrc;
        struct hlist_node       byspi;
+       struct hlist_node       byseq;
 
        refcount_t              refcnt;
        spinlock_t              lock;
@@ -214,9 +221,8 @@ struct xfrm_state {
        struct xfrm_replay_state preplay;
        struct xfrm_replay_state_esn *preplay_esn;
 
-       /* The functions for replay detection. */
-       const struct xfrm_replay *repl;
-
+       /* replay detection mode */
+       enum xfrm_replay_mode    repl_mode;
        /* internal flag that only holds state for delayed aevent at the
         * moment
        */
@@ -296,18 +302,6 @@ struct km_event {
        struct net *net;
 };
 
-struct xfrm_replay {
-       void    (*advance)(struct xfrm_state *x, __be32 net_seq);
-       int     (*check)(struct xfrm_state *x,
-                        struct sk_buff *skb,
-                        __be32 net_seq);
-       int     (*recheck)(struct xfrm_state *x,
-                          struct sk_buff *skb,
-                          __be32 net_seq);
-       void    (*notify)(struct xfrm_state *x, int event);
-       int     (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
-};
-
 struct xfrm_if_cb {
        struct xfrm_if  *(*decode_session)(struct sk_buff *skb,
                                           unsigned short family);
@@ -387,7 +381,6 @@ void xfrm_flush_gc(void);
 void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
 struct xfrm_type {
-       char                    *description;
        struct module           *owner;
        u8                      proto;
        u8                      flags;
@@ -402,14 +395,12 @@ struct xfrm_type {
        int                     (*output)(struct xfrm_state *, struct sk_buff *pskb);
        int                     (*reject)(struct xfrm_state *, struct sk_buff *,
                                          const struct flowi *);
-       int                     (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
 };
 
 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
 
 struct xfrm_type_offload {
-       char            *description;
        struct module   *owner;
        u8              proto;
        void            (*encap)(struct xfrm_state *, struct sk_buff *pskb);
@@ -1024,6 +1015,7 @@ struct xfrm_offload {
 #define CRYPTO_INVALID_PROTOCOL                        128
 
        __u8                    proto;
+       __u8                    inner_ipproto;
 };
 
 struct sec_path {
@@ -1581,7 +1573,6 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 }
 
 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
@@ -1605,9 +1596,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
-int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
-                         u8 **prevhdr);
 
 #ifdef CONFIG_XFRM
 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
@@ -1721,6 +1709,12 @@ static inline int xfrm_policy_id2dir(u32 index)
 }
 
 #ifdef CONFIG_XFRM
+void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
+int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+void xfrm_replay_notify(struct xfrm_state *x, int event);
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
+
 static inline int xfrm_aevent_is_on(struct net *net)
 {
        struct sock *nlsk;
index 1358a0c..0bc29c4 100644 (file)
@@ -81,7 +81,7 @@ struct snd_compr_stream;
 #define SND_SOC_DAIFMT_CBP_CFP         (1 << 12) /* codec clk provider & frame provider */
 #define SND_SOC_DAIFMT_CBC_CFP         (2 << 12) /* codec clk consumer & frame provider */
 #define SND_SOC_DAIFMT_CBP_CFC         (3 << 12) /* codec clk provider & frame consumer */
-#define SND_SOC_DAIFMT_CBC_CFC         (4 << 12) /* codec clk consumer & frame follower */
+#define SND_SOC_DAIFMT_CBC_CFC         (4 << 12) /* codec clk consumer & frame consumer */
 
 /* previous definitions kept for backwards-compatibility, do not use in new contributions */
 #define SND_SOC_DAIFMT_CBM_CFM         SND_SOC_DAIFMT_CBP_CFP
index 775a46d..6bf4317 100644 (file)
@@ -73,6 +73,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
                __field(u64, data_seq)
                __field(u32, subflow_seq)
                __field(u16, data_len)
+               __field(u16, csum)
                __field(u8, use_map)
                __field(u8, dsn64)
                __field(u8, data_fin)
@@ -82,6 +83,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
                __field(u8, frozen)
                __field(u8, reset_transient)
                __field(u8, reset_reason)
+               __field(u8, csum_reqd)
        ),
 
        TP_fast_assign(
@@ -89,6 +91,7 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
                __entry->data_seq = mpext->data_seq;
                __entry->subflow_seq = mpext->subflow_seq;
                __entry->data_len = mpext->data_len;
+               __entry->csum = (__force u16)mpext->csum;
                __entry->use_map = mpext->use_map;
                __entry->dsn64 = mpext->dsn64;
                __entry->data_fin = mpext->data_fin;
@@ -98,16 +101,18 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext,
                __entry->frozen = mpext->frozen;
                __entry->reset_transient = mpext->reset_transient;
                __entry->reset_reason = mpext->reset_reason;
+               __entry->csum_reqd = mpext->csum_reqd;
        ),
 
-       TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u",
+       TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u",
                  __entry->data_ack, __entry->data_seq,
                  __entry->subflow_seq, __entry->data_len,
-                 __entry->use_map, __entry->dsn64,
-                 __entry->data_fin, __entry->use_ack,
-                 __entry->ack64, __entry->mpc_map,
-                 __entry->frozen, __entry->reset_transient,
-                 __entry->reset_reason)
+                 __entry->csum, __entry->use_map,
+                 __entry->dsn64, __entry->data_fin,
+                 __entry->use_ack, __entry->ack64,
+                 __entry->mpc_map, __entry->frozen,
+                 __entry->reset_transient, __entry->reset_reason,
+                 __entry->csum_reqd)
 );
 
 DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status,
index 4dcd13d..d588c24 100644 (file)
 #define SO_PREFER_BUSY_POLL    69
 #define SO_BUSY_POLL_BUDGET    70
 
+#define SO_NETNS_COOKIE                71
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
index 6de5a7f..d2a9420 100644 (file)
@@ -863,8 +863,7 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
 __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
 #define __NR_mount_setattr 442
 __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_path 443
-__SYSCALL(__NR_quotactl_path, sys_quotactl_path)
+/* 443 is reserved for quotactl_path */
 
 #define __NR_landlock_create_ruleset 444
 __SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
index 825cfda..c7135c9 100644 (file)
@@ -675,7 +675,7 @@ enum {
        ETHTOOL_A_MODULE_EEPROM_PAGE,                   /* u8 */
        ETHTOOL_A_MODULE_EEPROM_BANK,                   /* u8 */
        ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS,            /* u8 */
-       ETHTOOL_A_MODULE_EEPROM_DATA,                   /* nested */
+       ETHTOOL_A_MODULE_EEPROM_DATA,                   /* binary */
 
        __ETHTOOL_A_MODULE_EEPROM_CNT,
        ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1)
index 7d66876..d1b3270 100644 (file)
@@ -289,6 +289,9 @@ struct sockaddr_in {
 /* Address indicating an error return. */
 #define        INADDR_NONE             ((unsigned long int) 0xffffffff)
 
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define        INADDR_DUMMY            ((unsigned long int) 0xc0000008)
+
 /* Network number for local host loopback. */
 #define        IN_LOOPBACKNET          127
 
index ee93428..225ec87 100644 (file)
 #define KEY_VOICECOMMAND               0x246   /* Listening Voice Command */
 #define KEY_ASSISTANT          0x247   /* AL Context-aware desktop assistant */
 #define KEY_KBD_LAYOUT_NEXT    0x248   /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER       0x249   /* Show/hide emoji picker (HUTRR101) */
 
 #define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
 #define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
index e1ae466..162ff99 100644 (file)
@@ -280,6 +280,7 @@ struct io_uring_params {
 #define IORING_FEAT_SQPOLL_NONFIXED    (1U << 7)
 #define IORING_FEAT_EXT_ARG            (1U << 8)
 #define IORING_FEAT_NATIVE_WORKERS     (1U << 9)
+#define IORING_FEAT_RSRC_TAGS          (1U << 10)
 
 /*
  * io_uring_register(2) opcodes and arguments
@@ -298,8 +299,12 @@ enum {
        IORING_UNREGISTER_PERSONALITY           = 10,
        IORING_REGISTER_RESTRICTIONS            = 11,
        IORING_REGISTER_ENABLE_RINGS            = 12,
-       IORING_REGISTER_RSRC                    = 13,
-       IORING_REGISTER_RSRC_UPDATE             = 14,
+
+       /* extended with tagging */
+       IORING_REGISTER_FILES2                  = 13,
+       IORING_REGISTER_FILES_UPDATE2           = 14,
+       IORING_REGISTER_BUFFERS2                = 15,
+       IORING_REGISTER_BUFFERS_UPDATE          = 16,
 
        /* this goes last */
        IORING_REGISTER_LAST
@@ -312,14 +317,10 @@ struct io_uring_files_update {
        __aligned_u64 /* __s32 * */ fds;
 };
 
-enum {
-       IORING_RSRC_FILE                = 0,
-       IORING_RSRC_BUFFER              = 1,
-};
-
 struct io_uring_rsrc_register {
-       __u32 type;
        __u32 nr;
+       __u32 resv;
+       __u64 resv2;
        __aligned_u64 data;
        __aligned_u64 tags;
 };
@@ -335,8 +336,8 @@ struct io_uring_rsrc_update2 {
        __u32 resv;
        __aligned_u64 data;
        __aligned_u64 tags;
-       __u32 type;
        __u32 nr;
+       __u32 resv2;
 };
 
 /* Skip updating fd indexes set to this value in the fd table */
index 3fd9a7e..79d9c44 100644 (file)
@@ -8,6 +8,7 @@
  * Note: you must update KVM_API_VERSION if you change this interface.
  */
 
+#include <linux/const.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
@@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd {
  * conversion after harvesting an entry.  Also, it must not skip any
  * dirty bits, so that dirty bits are always harvested in sequence.
  */
-#define KVM_DIRTY_GFN_F_DIRTY           BIT(0)
-#define KVM_DIRTY_GFN_F_RESET           BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY           _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET           _BITUL(1)
 #define KVM_DIRTY_GFN_F_MASK            0x3
 
 /*
index 8eb3c08..7b05f71 100644 (file)
@@ -105,6 +105,7 @@ struct mptcp_info {
        __u64   mptcpi_rcv_nxt;
        __u8    mptcpi_local_addr_used;
        __u8    mptcpi_local_addr_max;
+       __u8    mptcpi_csum_enabled;
 };
 
 /*
index 19715e2..e94d1fa 100644 (file)
@@ -1196,6 +1196,21 @@ enum nft_counter_attributes {
 #define NFTA_COUNTER_MAX       (__NFTA_COUNTER_MAX - 1)
 
 /**
+ * enum nft_last_attributes - nf_tables last expression netlink attributes
+ *
+ * @NFTA_LAST_SET: last update has been set, zero means never updated (NLA_U32)
+ * @NFTA_LAST_MSECS: milliseconds since last update (NLA_U64)
+ */
+enum nft_last_attributes {
+       NFTA_LAST_UNSPEC,
+       NFTA_LAST_SET,
+       NFTA_LAST_MSECS,
+       NFTA_LAST_PAD,
+       __NFTA_LAST_MAX
+};
+#define NFTA_LAST_MAX  (__NFTA_LAST_MAX - 1)
+
+/**
  * enum nft_log_attributes - nf_tables log expression netlink attributes
  *
  * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
index f962c06..db47499 100644 (file)
@@ -11,7 +11,7 @@
  * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
  * Copyright 2008 Colin McCabe <colin@cozybit.com>
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -3654,6 +3654,8 @@ enum nl80211_mpath_info {
  *     defined
  * @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16),
  *     given for all 6 GHz band channels
+ * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are
+ *     advertised on this band/for this iftype (binary)
  * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
  */
 enum nl80211_band_iftype_attr {
@@ -3665,6 +3667,7 @@ enum nl80211_band_iftype_attr {
        NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
        NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
        NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA,
+       NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS,
 
        /* keep last */
        __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
@@ -6912,6 +6915,9 @@ enum nl80211_peer_measurement_ftm_capa {
  * @NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK: negotiate for LMR feedback. Only
  *     valid if either %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED or
  *     %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set.
+ * @NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR: optional. The BSS color of the
+ *     responder. Only valid if %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED
+ *     or %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED is set.
  *
  * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
  * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
@@ -6931,6 +6937,7 @@ enum nl80211_peer_measurement_ftm_req {
        NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED,
        NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED,
        NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK,
+       NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR,
 
        /* keep last */
        NUM_NL80211_PMSR_FTM_REQ_ATTR,
index cb78e7a..c4ff1eb 100644 (file)
@@ -141,6 +141,7 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE   131
 #define SCTP_EXPOSE_PF_STATE   SCTP_EXPOSE_POTENTIALLY_FAILED_STATE
 #define SCTP_REMOTE_UDP_ENCAPS_PORT    132
+#define SCTP_PLPMTUD_PROBE_INTERVAL    133
 
 /* PR-SCTP policies */
 #define SCTP_PR_SCTP_NONE      0x0000
@@ -1213,4 +1214,11 @@ enum sctp_sched_type {
        SCTP_SS_MAX = SCTP_SS_RR
 };
 
+/* Probe Interval socket option */
+struct sctp_probeinterval {
+       sctp_assoc_t spi_assoc_id;
+       struct sockaddr_storage spi_address;
+       __u32 spi_interval;
+};
+
 #endif /* _UAPI_SCTP_H */
index 5ae3ace..332b18f 100644 (file)
@@ -64,6 +64,8 @@ enum {
        SEG6_LOCAL_ACTION_END_AM        = 14,
        /* custom BPF action */
        SEG6_LOCAL_ACTION_END_BPF       = 15,
+       /* decap and lookup of DA in v4 or v6 table */
+       SEG6_LOCAL_ACTION_END_DT46      = 16,
 
        __SEG6_LOCAL_ACTION_MAX,
 };
index 26fc60c..904909d 100644 (file)
@@ -290,6 +290,8 @@ enum
        LINUX_MIB_TCPDUPLICATEDATAREHASH,       /* TCPDuplicateDataRehash */
        LINUX_MIB_TCPDSACKRECVSEGS,             /* TCPDSACKRecvSegs */
        LINUX_MIB_TCPDSACKIGNOREDDUBIOUS,       /* TCPDSACKIgnoredDubious */
+       LINUX_MIB_TCPMIGRATEREQSUCCESS,         /* TCPMigrateReqSuccess */
+       LINUX_MIB_TCPMIGRATEREQFAILURE,         /* TCPMigrateReqFailure */
        __LINUX_MIB_MAX
 };
 
index eb01e12..e9c42a1 100644 (file)
@@ -1537,7 +1537,7 @@ static noinline void __init kernel_init_freeable(void)
         */
        set_mems_allowed(node_states[N_MEMORY]);
 
-       cad_pid = task_pid(current);
+       cad_pid = get_pid(task_pid(current));
 
        smp_prepare_cpus(setup_max_cpus);
 
index b7d51fc..e04e338 100644 (file)
@@ -6485,6 +6485,27 @@ struct bpf_sanitize_info {
        bool mask_to_left;
 };
 
+static struct bpf_verifier_state *
+sanitize_speculative_path(struct bpf_verifier_env *env,
+                         const struct bpf_insn *insn,
+                         u32 next_idx, u32 curr_idx)
+{
+       struct bpf_verifier_state *branch;
+       struct bpf_reg_state *regs;
+
+       branch = push_stack(env, next_idx, curr_idx, true);
+       if (branch && insn) {
+               regs = branch->frame[branch->curframe]->regs;
+               if (BPF_SRC(insn->code) == BPF_K) {
+                       mark_reg_unknown(env, regs, insn->dst_reg);
+               } else if (BPF_SRC(insn->code) == BPF_X) {
+                       mark_reg_unknown(env, regs, insn->dst_reg);
+                       mark_reg_unknown(env, regs, insn->src_reg);
+               }
+       }
+       return branch;
+}
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                            struct bpf_insn *insn,
                            const struct bpf_reg_state *ptr_reg,
@@ -6568,12 +6589,26 @@ do_sim:
                tmp = *dst_reg;
                *dst_reg = *ptr_reg;
        }
-       ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+       ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
+                                       env->insn_idx);
        if (!ptr_is_dst_reg && ret)
                *dst_reg = tmp;
        return !ret ? REASON_STACK : 0;
 }
 
+static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
+{
+       struct bpf_verifier_state *vstate = env->cur_state;
+
+       /* If we simulate paths under speculation, we don't update the
+        * insn as 'seen' such that when we verify unreachable paths in
+        * the non-speculative domain, sanitize_dead_code() can still
+        * rewrite/sanitize them.
+        */
+       if (!vstate->speculative)
+               env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+}
+
 static int sanitize_err(struct bpf_verifier_env *env,
                        const struct bpf_insn *insn, int reason,
                        const struct bpf_reg_state *off_reg,
@@ -8752,14 +8787,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                if (err)
                        return err;
        }
+
        if (pred == 1) {
-               /* only follow the goto, ignore fall-through */
+               /* Only follow the goto, ignore fall-through. If needed, push
+                * the fall-through branch for simulation under speculative
+                * execution.
+                */
+               if (!env->bypass_spec_v1 &&
+                   !sanitize_speculative_path(env, insn, *insn_idx + 1,
+                                              *insn_idx))
+                       return -EFAULT;
                *insn_idx += insn->off;
                return 0;
        } else if (pred == 0) {
-               /* only follow fall-through branch, since
-                * that's where the program will go
+               /* Only follow the fall-through branch, since that's where the
+                * program will go. If needed, push the goto branch for
+                * simulation under speculative execution.
                 */
+               if (!env->bypass_spec_v1 &&
+                   !sanitize_speculative_path(env, insn,
+                                              *insn_idx + insn->off + 1,
+                                              *insn_idx))
+                       return -EFAULT;
                return 0;
        }
 
@@ -10621,7 +10670,7 @@ static int do_check(struct bpf_verifier_env *env)
                }
 
                regs = cur_regs(env);
-               env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+               sanitize_mark_insn_seen(env);
                prev_insn_idx = env->insn_idx;
 
                if (class == BPF_ALU || class == BPF_ALU64) {
@@ -10848,7 +10897,7 @@ process_bpf_exit:
                                        return err;
 
                                env->insn_idx++;
-                               env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
+                               sanitize_mark_insn_seen(env);
                        } else {
                                verbose(env, "invalid BPF_LD mode\n");
                                return -EINVAL;
@@ -11381,6 +11430,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
 {
        struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
        struct bpf_insn *insn = new_prog->insnsi;
+       u32 old_seen = old_data[off].seen;
        u32 prog_len;
        int i;
 
@@ -11401,7 +11451,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
        memcpy(new_data + off + cnt - 1, old_data + off,
               sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
        for (i = off; i < off + cnt - 1; i++) {
-               new_data[i].seen = env->pass_cnt;
+               /* Expand insni[off]'s seen count to the patched range. */
+               new_data[i].seen = old_seen;
                new_data[i].zext_dst = insn_has_def32(env, insn + i);
        }
        env->insn_aux_data = new_data;
@@ -12725,6 +12776,9 @@ static void free_states(struct bpf_verifier_env *env)
  * insn_aux_data was touched. These variables are compared to clear temporary
  * data from failed pass. For testing and experiments do_check_common() can be
  * run multiple times even when prior attempt to verify is unsuccessful.
+ *
+ * Note that special handling is needed on !env->bypass_spec_v1 if this is
+ * ever called outside of error path with subsequent program rejection.
  */
 static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
 {
index 8190b6b..1f274d7 100644 (file)
@@ -820,6 +820,10 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
        struct cgroup *cgrp = kn->priv;
        int ret;
 
+       /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
+       if (strchr(new_name_str, '\n'))
+               return -EINVAL;
+
        if (kernfs_type(kn) != KERNFS_DIR)
                return -ENOTDIR;
        if (kn->parent != new_parent)
index 825284b..684a606 100644 (file)
@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
        VMCOREINFO_STRUCT_SIZE(mem_section);
        VMCOREINFO_OFFSET(mem_section, section_mem_map);
+       VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
        VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
 #endif
        VMCOREINFO_STRUCT_SIZE(page);
index a0b3b04..bf16395 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/highmem.h>
 #include <linux/livepatch.h>
 #include <linux/audit.h>
+#include <linux/tick.h>
 
 #include "common.h"
 
@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                local_irq_disable_exit_to_user();
 
                /* Check if any of the above work has queued a deferred wakeup */
-               rcu_nocb_flush_deferred_wakeup();
+               tick_nohz_user_enter_prepare();
 
                ti_work = READ_ONCE(current_thread_info()->flags);
        }
@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
        lockdep_assert_irqs_disabled();
 
        /* Flush pending rcuog wakeup before the last need_resched() check */
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 
        if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
                ti_work = exit_to_user_mode_loop(regs, ti_work);
index 6fee4a7..fe88d6e 100644 (file)
@@ -4609,7 +4609,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                ctx = &cpuctx->ctx;
                get_ctx(ctx);
+               raw_spin_lock_irqsave(&ctx->lock, flags);
                ++ctx->pin_count;
+               raw_spin_unlock_irqrestore(&ctx->lock, flags);
 
                return ctx;
        }
index 23a7a0b..db8c248 100644 (file)
@@ -70,9 +70,6 @@ bool irq_work_queue(struct irq_work *work)
        if (!irq_work_claim(work))
                return false;
 
-       /*record irq_work call stack in order to print it in KASAN reports*/
-       kasan_record_aux_stack(work);
-
        /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
        __irq_work_queue_local(work);
index 7a14146..9423218 100644 (file)
@@ -391,6 +391,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
        /* No obstacles. */
        return vprintk_default(fmt, args);
 }
+EXPORT_SYMBOL(vprintk);
 
 void __init printk_safe_init(void)
 {
@@ -411,4 +412,3 @@ void __init printk_safe_init(void)
        /* Flush pending messages that did not have scheduled IRQ works. */
        printk_safe_flush();
 }
-EXPORT_SYMBOL(vprintk);
index 5226cc2..4ca80df 100644 (file)
@@ -6389,7 +6389,6 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
 {
        return __sched_setscheduler(p, attr, false, true);
 }
-EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
 
 /**
  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
index 9c882f2..c5aacbd 100644 (file)
@@ -885,6 +885,7 @@ static const struct seq_operations sched_debug_sops = {
 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
 #define __P(F) __PS(#F, F)
 #define   P(F) __PS(#F, p->F)
+#define   PM(F, M) __PS(#F, p->F & (M))
 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
 #define __PN(F) __PSN(#F, F)
 #define   PN(F) __PSN(#F, p->F)
@@ -1011,7 +1012,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
        P(se.avg.util_avg);
        P(se.avg.last_update_time);
        P(se.avg.util_est.ewma);
-       P(se.avg.util_est.enqueued);
+       PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
 #endif
 #ifdef CONFIG_UCLAMP_TASK
        __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
index 3248e24..2c8a935 100644 (file)
@@ -3499,10 +3499,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
 static inline void
 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
 {
-       long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+       long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
        unsigned long load_avg;
        u64 load_sum = 0;
-       s64 delta_sum;
        u32 divider;
 
        if (!runnable_sum)
@@ -3549,13 +3548,13 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
        load_sum = (s64)se_weight(se) * runnable_sum;
        load_avg = div_s64(load_sum, divider);
 
-       delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
-       delta_avg = load_avg - se->avg.load_avg;
+       delta = load_avg - se->avg.load_avg;
 
        se->avg.load_sum = runnable_sum;
        se->avg.load_avg = load_avg;
-       add_positive(&cfs_rq->avg.load_avg, delta_avg);
-       add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+       add_positive(&cfs_rq->avg.load_avg, delta);
+       cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
 }
 
 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3766,11 +3765,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
  */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+       /*
+        * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
+        * See ___update_load_avg() for details.
+        */
+       u32 divider = get_pelt_divider(&cfs_rq->avg);
+
        dequeue_load_avg(cfs_rq, se);
        sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
-       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+       cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
        sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
-       sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+       cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
 
        add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
 
@@ -3902,7 +3907,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
 {
        struct util_est ue = READ_ONCE(p->se.avg.util_est);
 
-       return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
+       return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
 }
 
 static inline unsigned long task_util_est(struct task_struct *p)
@@ -4002,7 +4007,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
         * Reset EWMA on utilization increases, the moving average is used only
         * to smooth utilization decreases.
         */
-       ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
+       ue.enqueued = task_util(p);
        if (sched_feat(UTIL_EST_FASTUP)) {
                if (ue.ewma < ue.enqueued) {
                        ue.ewma = ue.enqueued;
@@ -4051,6 +4056,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
        ue.ewma  += last_ewma_diff;
        ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
 done:
+       ue.enqueued |= UTIL_AVG_UNCHANGED;
        WRITE_ONCE(p->se.avg.util_est, ue);
 
        trace_sched_util_est_se_tp(&p->se);
@@ -8030,7 +8036,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
                /* Propagate pending load changes to the parent, if any: */
                se = cfs_rq->tg->se[cpu];
                if (se && !skip_blocked_update(se))
-                       update_load_avg(cfs_rq_of(se), se, 0);
+                       update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
 
                /*
                 * There can be a lot of idle CPU cgroups.  Don't let fully
index 1462846..cfe94ff 100644 (file)
@@ -42,15 +42,6 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
        return LOAD_AVG_MAX - 1024 + avg->period_contrib;
 }
 
-/*
- * When a task is dequeued, its estimated utilization should not be update if
- * its util_avg has not been updated at least once.
- * This flag is used to synchronize util_avg updates with util_est updates.
- * We map this information into the LSB bit of the utilization saved at
- * dequeue time (i.e. util_est.dequeued).
- */
-#define UTIL_AVG_UNCHANGED 0x1
-
 static inline void cfs_se_util_change(struct sched_avg *avg)
 {
        unsigned int enqueued;
@@ -58,7 +49,7 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
        if (!sched_feat(UTIL_EST))
                return;
 
-       /* Avoid store if the flag has been already set */
+       /* Avoid store if the flag has been already reset */
        enqueued = avg->util_est.enqueued;
        if (!(enqueued & UTIL_AVG_UNCHANGED))
                return;
index 6ecd3f3..9f58049 100644 (file)
@@ -1105,28 +1105,30 @@ static int seccomp_do_user_notification(int this_syscall,
 
        up(&match->notif->request);
        wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
-       mutex_unlock(&match->notify_lock);
 
        /*
         * This is where we wait for a reply from userspace.
         */
-wait:
-       err = wait_for_completion_interruptible(&n.ready);
-       mutex_lock(&match->notify_lock);
-       if (err == 0) {
-               /* Check if we were woken up by a addfd message */
+       do {
+               mutex_unlock(&match->notify_lock);
+               err = wait_for_completion_interruptible(&n.ready);
+               mutex_lock(&match->notify_lock);
+               if (err != 0)
+                       goto interrupted;
+
                addfd = list_first_entry_or_null(&n.addfd,
                                                 struct seccomp_kaddfd, list);
-               if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) {
+               /* Check if we were woken up by a addfd message */
+               if (addfd)
                        seccomp_handle_addfd(addfd);
-                       mutex_unlock(&match->notify_lock);
-                       goto wait;
-               }
-               ret = n.val;
-               err = n.error;
-               flags = n.flags;
-       }
 
+       }  while (n.state != SECCOMP_NOTIFY_REPLIED);
+
+       ret = n.val;
+       err = n.error;
+       flags = n.flags;
+
+interrupted:
        /* If there were any pending addfd calls, clear them out */
        list_for_each_entry_safe(addfd, tmp, &n.addfd, list) {
                /* The process went away before we got a chance to handle it */
index 828b091..6784f27 100644 (file)
@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
+EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
 bool tick_nohz_full_running;
 EXPORT_SYMBOL_GPL(tick_nohz_full_running);
 static atomic_t tick_dep_mask;
index 2e8a3fd..72ef4dc 100644 (file)
@@ -1967,12 +1967,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
 
 static void print_ip_ins(const char *fmt, const unsigned char *p)
 {
+       char ins[MCOUNT_INSN_SIZE];
        int i;
 
+       if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+               printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+               return;
+       }
+
        printk(KERN_CONT "%s", fmt);
 
        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
-               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+               printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
 }
 
 enum ftrace_bug_type ftrace_bug_type;
index a21ef9c..d23a09d 100644 (file)
@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
 };
 static struct saved_cmdlines_buffer *savedcmd;
 
-/* temporary disable recording */
-static atomic_t trace_record_taskinfo_disabled __read_mostly;
-
 static inline char *get_saved_cmdlines(int idx)
 {
        return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
 {
        if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
                return true;
-       if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
-               return true;
        if (!__this_cpu_read(trace_taskinfo_save))
                return true;
        return false;
@@ -2736,7 +2731,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
            (entry = this_cpu_read(trace_buffered_event))) {
                /* Try to use the per cpu buffer first */
                val = this_cpu_inc_return(trace_buffered_event_cnt);
-               if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
+               if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
                        trace_event_setup(entry, type, trace_ctx);
                        entry->array[0] = len;
                        return entry;
@@ -3998,9 +3993,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
                return ERR_PTR(-EBUSY);
 #endif
 
-       if (!iter->snapshot)
-               atomic_inc(&trace_record_taskinfo_disabled);
-
        if (*pos != iter->pos) {
                iter->ent = NULL;
                iter->cpu = 0;
@@ -4043,9 +4035,6 @@ static void s_stop(struct seq_file *m, void *p)
                return;
 #endif
 
-       if (!iter->snapshot)
-               atomic_dec(&trace_record_taskinfo_disabled);
-
        trace_access_unlock(iter->cpu_file);
        trace_event_read_unlock();
 }
index c1637f9..4702efb 100644 (file)
@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
        prev_time = READ_ONCE(trace_clock_struct.prev_time);
        now = sched_clock_cpu(this_cpu);
 
-       /* Make sure that now is always greater than prev_time */
+       /* Make sure that now is always greater than or equal to prev_time */
        if ((s64)(now - prev_time) < 0)
-               now = prev_time + 1;
+               now = prev_time;
 
        /*
         * If in an NMI context then dont risk lockups and simply return
@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
                /* Reread prev_time in case it was already updated */
                prev_time = READ_ONCE(trace_clock_struct.prev_time);
                if ((s64)(now - prev_time) < 0)
-                       now = prev_time + 1;
+                       now = prev_time;
 
                trace_clock_struct.prev_time = now;
 
index 47cfa05..9f852a8 100644 (file)
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
 /**
  * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
  * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
-       or the previous crc64 value if computing incrementally.
      or the previous crc64 value if computing incrementally.
  * @p: pointer to buffer over which CRC64 is run
  * @len: length of buffer @p
  */
index a1071cd..af93021 100644 (file)
@@ -275,7 +275,7 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
        wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
                            percpu_ref_switch_lock);
 
-       if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+       if (data->force_atomic || percpu_ref_is_dying(ref))
                __percpu_ref_switch_to_atomic(ref, confirm_switch);
        else
                __percpu_ref_switch_to_percpu(ref);
@@ -385,7 +385,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+       WARN_ONCE(percpu_ref_is_dying(ref),
                  "%s called more than once on %ps!", __func__,
                  ref->data->release);
 
@@ -465,7 +465,7 @@ void percpu_ref_resurrect(struct percpu_ref *ref)
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+       WARN_ON_ONCE(!percpu_ref_is_dying(ref));
        WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
 
        ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
index 05efe98..297d1b3 100644 (file)
@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PMD advanced\n");
        /* Align the address wrt HPAGE_PMD_SIZE */
-       vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
+       vaddr &= HPAGE_PMD_MASK;
 
        pgtable_trans_huge_deposit(mm, pmdp, pgtable);
 
@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PUD advanced\n");
        /* Align the address wrt HPAGE_PUD_SIZE */
-       vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
+       vaddr &= HPAGE_PUD_MASK;
 
        set_pud_at(mm, vaddr, pudp, pud);
        pudp_set_wrprotect(mm, vaddr, pudp);
index 63ed6b2..6d2a011 100644 (file)
@@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
 
 static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
+unsigned long huge_zero_pfn __read_mostly = ~0UL;
 
 bool transparent_hugepage_enabled(struct vm_area_struct *vma)
 {
@@ -98,6 +99,7 @@ retry:
                __free_pages(zero_page, compound_order(zero_page));
                goto retry;
        }
+       WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
 
        /* We take additional reference here. It will be put back by shrinker */
        atomic_set(&huge_zero_refcount, 2);
@@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
        if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
                struct page *zero_page = xchg(&huge_zero_page, NULL);
                BUG_ON(zero_page == NULL);
+               WRITE_ONCE(huge_zero_pfn, ~0UL);
                __free_pages(zero_page, compound_order(zero_page));
                return HPAGE_PMD_NR;
        }
@@ -2044,7 +2047,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        count_vm_event(THP_SPLIT_PMD);
 
        if (!vma_is_anonymous(vma)) {
-               _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+               old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
                /*
                 * We are going to unmap this huge page. So
                 * just go ahead and zap it
@@ -2053,16 +2056,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        zap_deposited_table(mm, pmd);
                if (vma_is_special_huge(vma))
                        return;
-               page = pmd_page(_pmd);
-               if (!PageDirty(page) && pmd_dirty(_pmd))
-                       set_page_dirty(page);
-               if (!PageReferenced(page) && pmd_young(_pmd))
-                       SetPageReferenced(page);
-               page_remove_rmap(page, true);
-               put_page(page);
+               if (unlikely(is_pmd_migration_entry(old_pmd))) {
+                       swp_entry_t entry;
+
+                       entry = pmd_to_swp_entry(old_pmd);
+                       page = migration_entry_to_page(entry);
+               } else {
+                       page = pmd_page(old_pmd);
+                       if (!PageDirty(page) && pmd_dirty(old_pmd))
+                               set_page_dirty(page);
+                       if (!PageReferenced(page) && pmd_young(old_pmd))
+                               SetPageReferenced(page);
+                       page_remove_rmap(page, true);
+                       put_page(page);
+               }
                add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
                return;
-       } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+       }
+
+       if (is_huge_zero_pmd(*pmd)) {
                /*
                 * FIXME: Do we want to invalidate secondary mmu by calling
                 * mmu_notifier_invalidate_range() see comments below inside
@@ -2338,17 +2350,17 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
 
 static void unmap_page(struct page *page)
 {
-       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
+       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC |
                TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
-       bool unmap_success;
 
        VM_BUG_ON_PAGE(!PageHead(page), page);
 
        if (PageAnon(page))
                ttu_flags |= TTU_SPLIT_FREEZE;
 
-       unmap_success = try_to_unmap(page, ttu_flags);
-       VM_BUG_ON_PAGE(!unmap_success, page);
+       try_to_unmap(page, ttu_flags);
+
+       VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
 }
 
 static void remap_page(struct page *page, unsigned int nr)
@@ -2659,7 +2671,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        struct deferred_split *ds_queue = get_deferred_split_queue(head);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
-       int count, mapcount, extra_pins, ret;
+       int extra_pins, ret;
        pgoff_t end;
 
        VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
@@ -2718,7 +2730,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        }
 
        unmap_page(head);
-       VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
        /* block interrupt reentry in xa_lock and spinlock */
        local_irq_disable();
@@ -2736,9 +2747,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
        /* Prevent deferred_split_scan() touching ->_refcount */
        spin_lock(&ds_queue->split_queue_lock);
-       count = page_count(head);
-       mapcount = total_mapcount(head);
-       if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
+       if (page_ref_freeze(head, 1 + extra_pins)) {
                if (!list_empty(page_deferred_list(head))) {
                        ds_queue->split_queue_len--;
                        list_del(page_deferred_list(head));
@@ -2758,16 +2767,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                __split_huge_page(page, list, end);
                ret = 0;
        } else {
-               if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
-                       pr_alert("total_mapcount: %u, page_count(): %u\n",
-                                       mapcount, count);
-                       if (PageTail(page))
-                               dump_page(head, NULL);
-                       dump_page(page, "total_mapcount(head) > 0");
-                       BUG();
-               }
                spin_unlock(&ds_queue->split_queue_lock);
-fail:          if (mapping)
+fail:
+               if (mapping)
                        xa_unlock(&mapping->i_pages);
                local_irq_enable();
                remap_page(head, thp_nr_pages(head));
index 95918f4..e0a5f9c 100644 (file)
@@ -1793,7 +1793,7 @@ retry:
                        SetPageHWPoison(page);
                        ClearPageHWPoison(head);
                }
-               remove_hugetlb_page(h, page, false);
+               remove_hugetlb_page(h, head, false);
                h->max_huge_pages--;
                spin_unlock_irq(&hugetlb_lock);
                update_and_free_page(h, head);
@@ -2121,12 +2121,18 @@ out:
  * be restored when a newly allocated huge page must be freed.  It is
  * to be called after calling vma_needs_reservation to determine if a
  * reservation exists.
+ *
+ * vma_del_reservation is used in error paths where an entry in the reserve
+ * map was created during huge page allocation and must be removed.  It is to
+ * be called after calling vma_needs_reservation to determine if a reservation
+ * exists.
  */
 enum vma_resv_mode {
        VMA_NEEDS_RESV,
        VMA_COMMIT_RESV,
        VMA_END_RESV,
        VMA_ADD_RESV,
+       VMA_DEL_RESV,
 };
 static long __vma_reservation_common(struct hstate *h,
                                struct vm_area_struct *vma, unsigned long addr,
@@ -2170,11 +2176,21 @@ static long __vma_reservation_common(struct hstate *h,
                        ret = region_del(resv, idx, idx + 1);
                }
                break;
+       case VMA_DEL_RESV:
+               if (vma->vm_flags & VM_MAYSHARE) {
+                       region_abort(resv, idx, idx + 1, 1);
+                       ret = region_del(resv, idx, idx + 1);
+               } else {
+                       ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
+                       /* region_add calls of range 1 should never fail. */
+                       VM_BUG_ON(ret < 0);
+               }
+               break;
        default:
                BUG();
        }
 
-       if (vma->vm_flags & VM_MAYSHARE)
+       if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
                return ret;
        /*
         * We know private mapping must have HPAGE_RESV_OWNER set.
@@ -2222,25 +2238,39 @@ static long vma_add_reservation(struct hstate *h,
        return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
 }
 
+static long vma_del_reservation(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long addr)
+{
+       return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
+}
+
 /*
- * This routine is called to restore a reservation on error paths.  In the
- * specific error paths, a huge page was allocated (via alloc_huge_page)
- * and is about to be freed.  If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set
- * HPageRestoreReserve in the newly allocated page.  When the page is freed
- * via free_huge_page, the global reservation count will be incremented if
- * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
- * reserve map.  Adjust the reserve map here to be consistent with global
- * reserve count adjustments to be made by free_huge_page.
+ * This routine is called to restore reservation information on error paths.
+ * It should ONLY be called for pages allocated via alloc_huge_page(), and
+ * the hugetlb mutex should remain held when calling this routine.
+ *
+ * It handles two specific cases:
+ * 1) A reservation was in place and the page consumed the reservation.
+ *    HPageRestoreReserve is set in the page.
+ * 2) No reservation was in place for the page, so HPageRestoreReserve is
+ *    not set.  However, alloc_huge_page always updates the reserve map.
+ *
+ * In case 1, free_huge_page later in the error path will increment the
+ * global reserve count.  But, free_huge_page does not have enough context
+ * to adjust the reservation map.  This case deals primarily with private
+ * mappings.  Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page.  Make sure the
+ * reserve map indicates there is a reservation present.
+ *
+ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
  */
-static void restore_reserve_on_error(struct hstate *h,
-                       struct vm_area_struct *vma, unsigned long address,
-                       struct page *page)
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+                       unsigned long address, struct page *page)
 {
-       if (unlikely(HPageRestoreReserve(page))) {
-               long rc = vma_needs_reservation(h, vma, address);
+       long rc = vma_needs_reservation(h, vma, address);
 
-               if (unlikely(rc < 0)) {
+       if (HPageRestoreReserve(page)) {
+               if (unlikely(rc < 0))
                        /*
                         * Rare out of memory condition in reserve map
                         * manipulation.  Clear HPageRestoreReserve so that
@@ -2253,16 +2283,57 @@ static void restore_reserve_on_error(struct hstate *h,
                         * accounting of reserve counts.
                         */
                        ClearHPageRestoreReserve(page);
-               } else if (rc) {
-                       rc = vma_add_reservation(h, vma, address);
-                       if (unlikely(rc < 0))
+               else if (rc)
+                       (void)vma_add_reservation(h, vma, address);
+               else
+                       vma_end_reservation(h, vma, address);
+       } else {
+               if (!rc) {
+                       /*
+                        * This indicates there is an entry in the reserve map
+                        * added by alloc_huge_page.  We know it was added
+                        * before the alloc_huge_page call, otherwise
+                        * HPageRestoreReserve would be set on the page.
+                        * Remove the entry so that a subsequent allocation
+                        * does not consume a reservation.
+                        */
+                       rc = vma_del_reservation(h, vma, address);
+                       if (rc < 0)
                                /*
-                                * See above comment about rare out of
-                                * memory condition.
+                                * VERY rare out of memory condition.  Since
+                                * we can not delete the entry, set
+                                * HPageRestoreReserve so that the reserve
+                                * count will be incremented when the page
+                                * is freed.  This reserve will be consumed
+                                * on a subsequent allocation.
                                 */
-                               ClearHPageRestoreReserve(page);
+                               SetHPageRestoreReserve(page);
+               } else if (rc < 0) {
+                       /*
+                        * Rare out of memory condition from
+                        * vma_needs_reservation call.  Memory allocation is
+                        * only attempted if a new entry is needed.  Therefore,
+                        * this implies there is not an entry in the
+                        * reserve map.
+                        *
+                        * For shared mappings, no entry in the map indicates
+                        * no reservation.  We are done.
+                        */
+                       if (!(vma->vm_flags & VM_MAYSHARE))
+                               /*
+                                * For private mappings, no entry indicates
+                                * a reservation is present.  Since we can
+                                * not add an entry, set SetHPageRestoreReserve
+                                * on the page so reserve count will be
+                                * incremented when freed.  This reserve will
+                                * be consumed on a subsequent allocation.
+                                */
+                               SetHPageRestoreReserve(page);
                } else
-                       vma_end_reservation(h, vma, address);
+                       /*
+                        * No reservation present, do nothing
+                        */
+                        vma_end_reservation(h, vma, address);
        }
 }
 
@@ -4037,6 +4108,8 @@ again:
                                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
                                entry = huge_ptep_get(src_pte);
                                if (!pte_same(src_pte_old, entry)) {
+                                       restore_reserve_on_error(h, vma, addr,
+                                                               new);
                                        put_page(new);
                                        /* dst_entry won't change as in child */
                                        goto again;
@@ -4889,10 +4962,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                if (!page)
                        goto out;
        } else if (!*pagep) {
-               ret = -ENOMEM;
+               /* If a page already exists, then it's UFFDIO_COPY for
+                * a non-missing case. Return -EEXIST.
+                */
+               if (vm_shared &&
+                   hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+                       ret = -EEXIST;
+                       goto out;
+               }
+
                page = alloc_huge_page(dst_vma, dst_addr, 0);
-               if (IS_ERR(page))
+               if (IS_ERR(page)) {
+                       ret = -ENOMEM;
                        goto out;
+               }
 
                ret = copy_huge_page_from_user(page,
                                                (const void __user *) src_addr,
@@ -4996,6 +5079,7 @@ out_release_unlock:
        if (vm_shared || is_continue)
                unlock_page(page);
 out_release_nounlock:
+       restore_reserve_on_error(h, dst_vma, dst_addr, page);
        put_page(page);
        goto out;
 }
@@ -5847,6 +5931,21 @@ unlock:
        return ret;
 }
 
+int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
+{
+       int ret = 0;
+
+       *hugetlb = false;
+       spin_lock_irq(&hugetlb_lock);
+       if (PageHeadHuge(page)) {
+               *hugetlb = true;
+               if (HPageFreed(page) || HPageMigratable(page))
+                       ret = get_page_unless_zero(page);
+       }
+       spin_unlock_irq(&hugetlb_lock);
+       return ret;
+}
+
 void putback_active_hugepage(struct page *page)
 {
        spin_lock_irq(&hugetlb_lock);
index 2f11829..e8fdb53 100644 (file)
@@ -384,27 +384,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
 /*
- * At what user virtual address is page expected in @vma?
+ * At what user virtual address is page expected in vma?
+ * Returns -EFAULT if all of the page is outside the range of vma.
+ * If page is a compound head, the entire compound page is considered.
  */
 static inline unsigned long
-__vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address(struct page *page, struct vm_area_struct *vma)
 {
-       pgoff_t pgoff = page_to_pgoff(page);
-       return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+       pgoff_t pgoff;
+       unsigned long address;
+
+       VM_BUG_ON_PAGE(PageKsm(page), page);    /* KSM page->index unusable */
+       pgoff = page_to_pgoff(page);
+       if (pgoff >= vma->vm_pgoff) {
+               address = vma->vm_start +
+                       ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+               /* Check for address beyond vma (or wrapped through 0?) */
+               if (address < vma->vm_start || address >= vma->vm_end)
+                       address = -EFAULT;
+       } else if (PageHead(page) &&
+                  pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
+               /* Test above avoids possibility of wrap to 0 on 32-bit */
+               address = vma->vm_start;
+       } else {
+               address = -EFAULT;
+       }
+       return address;
 }
 
+/*
+ * Then at what user virtual address will none of the page be found in vma?
+ * Assumes that vma_address() already returned a good starting address.
+ * If page is a compound head, the entire compound page is considered.
+ */
 static inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+vma_address_end(struct page *page, struct vm_area_struct *vma)
 {
-       unsigned long start, end;
-
-       start = __vma_address(page, vma);
-       end = start + thp_size(page) - PAGE_SIZE;
-
-       /* page should be within @vma mapping range */
-       VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
-
-       return max(start, vma->vm_start);
+       pgoff_t pgoff;
+       unsigned long address;
+
+       VM_BUG_ON_PAGE(PageKsm(page), page);    /* KSM page->index unusable */
+       pgoff = page_to_pgoff(page) + compound_nr(page);
+       address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+       /* Check for address beyond vma (or wrapped through 0?) */
+       if (address < vma->vm_start || address > vma->vm_end)
+               address = vma->vm_end;
+       return address;
 }
 
 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
index c4605ac..348f31d 100644 (file)
@@ -220,8 +220,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
 /**
  * kasan_populate_early_shadow - populate shadow memory region with
  *                               kasan_early_shadow_page
- * @shadow_start - start of the memory range to populate
- * @shadow_end   - end of the memory range to populate
+ * @shadow_start: start of the memory range to populate
+ * @shadow_end: end of the memory range to populate
  */
 int __ref kasan_populate_early_shadow(const void *shadow_start,
                                        const void *shadow_end)
index e18fbbd..4d21ac4 100644 (file)
@@ -627,10 +627,10 @@ static void toggle_allocation_gate(struct work_struct *work)
                 * During low activity with no allocations we might wait a
                 * while; let's avoid the hung task warning.
                 */
-               wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
-                                  sysctl_hung_task_timeout_secs * HZ / 2);
+               wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
+                                       sysctl_hung_task_timeout_secs * HZ / 2);
        } else {
-               wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
+               wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
        }
 
        /* Disable static key and reset timer. */
index 85ad98c..0143d32 100644 (file)
@@ -949,6 +949,17 @@ static int page_action(struct page_state *ps, struct page *p,
        return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
 }
 
+/*
+ * Return true if a page type of a given page is supported by hwpoison
+ * mechanism (while handling could fail), otherwise false.  This function
+ * does not return true for hugetlb or device memory pages, so it's assumed
+ * to be called only in the context where we never have such pages.
+ */
+static inline bool HWPoisonHandlable(struct page *page)
+{
+       return PageLRU(page) || __PageMovable(page);
+}
+
 /**
  * __get_hwpoison_page() - Get refcount for memory error handling:
  * @page:      raw error page (hit by memory error)
@@ -959,8 +970,22 @@ static int page_action(struct page_state *ps, struct page *p,
 static int __get_hwpoison_page(struct page *page)
 {
        struct page *head = compound_head(page);
+       int ret = 0;
+       bool hugetlb = false;
+
+       ret = get_hwpoison_huge_page(head, &hugetlb);
+       if (hugetlb)
+               return ret;
 
-       if (!PageHuge(head) && PageTransHuge(head)) {
+       /*
+        * This check prevents from calling get_hwpoison_unless_zero()
+        * for any unsupported type of page in order to reduce the risk of
+        * unexpected races caused by taking a page refcount.
+        */
+       if (!HWPoisonHandlable(head))
+               return 0;
+
+       if (PageTransHuge(head)) {
                /*
                 * Non anonymous thp exists only in allocation/free time. We
                 * can't handle such a case correctly, so let's give it up.
@@ -1017,7 +1042,7 @@ try_again:
                        ret = -EIO;
                }
        } else {
-               if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
+               if (PageHuge(p) || HWPoisonHandlable(p)) {
                        ret = 1;
                } else {
                        /*
@@ -1527,7 +1552,12 @@ try_again:
                return 0;
        }
 
-       if (!PageTransTail(p) && !PageLRU(p))
+       /*
+        * __munlock_pagevec may clear a writeback page's LRU flag without
+        * page_lock. We need wait writeback completion for this page or it
+        * may trigger vfs BUG while evict inode.
+        */
+       if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
                goto identify_page_state;
 
        /*
index 730daa0..486f4a2 100644 (file)
@@ -1361,7 +1361,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                        else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
                        /* fall through */
+               } else if (details && details->single_page &&
+                          PageTransCompound(details->single_page) &&
+                          next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
+                       spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
+                       /*
+                        * Take and drop THP pmd lock so that we cannot return
+                        * prematurely, while zap_huge_pmd() has cleared *pmd,
+                        * but not yet decremented compound_mapcount().
+                        */
+                       spin_unlock(ptl);
                }
+
                /*
                 * Here there can be other concurrent MADV_DONTNEED or
                 * trans huge page faults running, and if the pmd is
@@ -2939,6 +2950,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
+               entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
                /*
@@ -3236,6 +3248,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
 }
 
 /**
+ * unmap_mapping_page() - Unmap single page from processes.
+ * @page: The locked page to be unmapped.
+ *
+ * Unmap this page from any userspace process which still has it mmaped.
+ * Typically, for efficiency, the range of nearby pages has already been
+ * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
+ * truncation or invalidation holds the lock on a page, it may find that
+ * the page has been remapped again: and then uses unmap_mapping_page()
+ * to unmap it finally.
+ */
+void unmap_mapping_page(struct page *page)
+{
+       struct address_space *mapping = page->mapping;
+       struct zap_details details = { };
+
+       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON(PageTail(page));
+
+       details.check_mapping = mapping;
+       details.first_index = page->index;
+       details.last_index = page->index + thp_nr_pages(page) - 1;
+       details.single_page = page;
+
+       i_mmap_lock_write(mapping);
+       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
+               unmap_mapping_range_tree(&mapping->i_mmap, &details);
+       i_mmap_unlock_write(mapping);
+}
+
+/**
  * unmap_mapping_pages() - Unmap pages from processes.
  * @mapping: The address space containing pages to be unmapped.
  * @start: Index of first page to be unmapped.
@@ -3602,6 +3644,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
+       entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3786,6 +3829,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
        if (prefault && arch_wants_old_prefaulted_pte())
                entry = pte_mkold(entry);
+       else
+               entry = pte_sw_mkyoung(entry);
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index b234c3f..41ff2c9 100644 (file)
@@ -295,6 +295,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
                goto out;
 
        page = migration_entry_to_page(entry);
+       page = compound_head(page);
 
        /*
         * Once page cache replacement of page migration started, page_count
index aaa1655..d1f5de1 100644 (file)
@@ -9158,6 +9158,8 @@ bool take_page_off_buddy(struct page *page)
                        del_page_from_free_list(page_head, zone, page_order);
                        break_down_buddy_pages(zone, page_head, page, 0,
                                                page_order, migratetype);
+                       if (!is_migrate_isolate(migratetype))
+                               __mod_zone_freepage_state(zone, -1, migratetype);
                        ret = true;
                        break;
                }
index 2cf01d9..e37bd43 100644 (file)
@@ -212,23 +212,34 @@ restart:
                        pvmw->ptl = NULL;
                }
        } else if (!pmd_present(pmde)) {
+               /*
+                * If PVMW_SYNC, take and drop THP pmd lock so that we
+                * cannot return prematurely, while zap_huge_pmd() has
+                * cleared *pmd but not decremented compound_mapcount().
+                */
+               if ((pvmw->flags & PVMW_SYNC) &&
+                   PageTransCompound(pvmw->page)) {
+                       spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
+
+                       spin_unlock(ptl);
+               }
                return false;
        }
        if (!map_pte(pvmw))
                goto next_pte;
        while (1) {
+               unsigned long end;
+
                if (check_pte(pvmw))
                        return true;
 next_pte:
                /* Seek to next pte only makes sense for THP */
                if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
                        return not_found(pvmw);
+               end = vma_address_end(pvmw->page, pvmw->vma);
                do {
                        pvmw->address += PAGE_SIZE;
-                       if (pvmw->address >= pvmw->vma->vm_end ||
-                           pvmw->address >=
-                                       __vma_address(pvmw->page, pvmw->vma) +
-                                       thp_size(pvmw->page))
+                       if (pvmw->address >= end)
                                return not_found(pvmw);
                        /* Did we cross page table boundary? */
                        if (pvmw->address % PMD_SIZE == 0) {
@@ -266,14 +277,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
                .vma = vma,
                .flags = PVMW_SYNC,
        };
-       unsigned long start, end;
-
-       start = __vma_address(page, vma);
-       end = start + thp_size(page) - PAGE_SIZE;
 
-       if (unlikely(end < vma->vm_start || start >= vma->vm_end))
+       pvmw.address = vma_address(page, vma);
+       if (pvmw.address == -EFAULT)
                return 0;
-       pvmw.address = max(start, vma->vm_start);
        if (!page_vma_mapped_walk(&pvmw))
                return 0;
        page_vma_mapped_walk_done(&pvmw);
index c2210e1..4e640ba 100644 (file)
@@ -135,9 +135,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
 {
        pmd_t pmd;
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-       VM_BUG_ON(!pmd_present(*pmdp));
-       /* Below assumes pmd_present() is true */
-       VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
+       VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+                          !pmd_devmap(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
index 693a610..e05c300 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -707,7 +707,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
-       unsigned long address;
        if (PageAnon(page)) {
                struct anon_vma *page__anon_vma = page_anon_vma(page);
                /*
@@ -717,15 +716,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
                if (!vma->anon_vma || !page__anon_vma ||
                    vma->anon_vma->root != page__anon_vma->root)
                        return -EFAULT;
-       } else if (page->mapping) {
-               if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
-                       return -EFAULT;
-       } else
+       } else if (!vma->vm_file) {
                return -EFAULT;
-       address = __vma_address(page, vma);
-       if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+       } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
                return -EFAULT;
-       return address;
+       }
+
+       return vma_address(page, vma);
 }
 
 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
@@ -919,7 +916,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
         */
        mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
                                0, vma, vma->vm_mm, address,
-                               min(vma->vm_end, address + page_size(page)));
+                               vma_address_end(page, vma));
        mmu_notifier_invalidate_range_start(&range);
 
        while (page_vma_mapped_walk(&pvmw)) {
@@ -1405,6 +1402,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        struct mmu_notifier_range range;
        enum ttu_flags flags = (enum ttu_flags)(long)arg;
 
+       /*
+        * When racing against e.g. zap_pte_range() on another cpu,
+        * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+        * try_to_unmap() may return false when it is about to become true,
+        * if page table locking is skipped: use TTU_SYNC to wait for that.
+        */
+       if (flags & TTU_SYNC)
+               pvmw.flags = PVMW_SYNC;
+
        /* munlock has nothing to gain from examining un-locked vmas */
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
                return true;
@@ -1426,9 +1432,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
         * Note that the page can not be free in this function as call of
         * try_to_unmap() must hold a reference on the page.
         */
+       range.end = PageKsm(page) ?
+                       address + PAGE_SIZE : vma_address_end(page, vma);
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
-                               address,
-                               min(vma->vm_end, address + page_size(page)));
+                               address, range.end);
        if (PageHuge(page)) {
                /*
                 * If sharing is possible, start and end will be adjusted
@@ -1777,7 +1784,13 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
        else
                rmap_walk(page, &rwc);
 
-       return !page_mapcount(page) ? true : false;
+       /*
+        * When racing against e.g. zap_pte_range() on another cpu,
+        * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+        * try_to_unmap() may return false when it is about to become true,
+        * if page table locking is skipped: use TTU_SYNC to wait for that.
+        */
+       return !page_mapcount(page);
 }
 
 /**
@@ -1874,6 +1887,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
 
+               VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();
 
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
@@ -1928,6 +1942,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
                        pgoff_start, pgoff_end) {
                unsigned long address = vma_address(page, vma);
 
+               VM_BUG_ON_VMA(address == -EFAULT, vma);
                cond_resched();
 
                if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
index a4a5714..7cab776 100644 (file)
@@ -97,8 +97,7 @@ EXPORT_SYMBOL(kmem_cache_size);
 #ifdef CONFIG_DEBUG_VM
 static int kmem_cache_sanity_check(const char *name, unsigned int size)
 {
-       if (!name || in_interrupt() || size < sizeof(void *) ||
-               size > KMALLOC_MAX_SIZE) {
+       if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
                pr_err("kmem_cache_create(%s) integrity check failed\n", name);
                return -EINVAL;
        }
index 3f96e09..61bd40e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/bit_spinlock.h>
 #include <linux/interrupt.h>
+#include <linux/swab.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include "slab.h"
@@ -712,15 +713,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+               print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
                              s->red_left_pad);
        else if (p > addr + 16)
                print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section(KERN_ERR, "Object ", p,
+       print_section(KERN_ERR,         "Object   ", p,
                      min_t(unsigned int, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section(KERN_ERR, "Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone  ", p + s->object_size,
                        s->inuse - s->object_size);
 
        off = get_info_end(s);
@@ -732,7 +733,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section(KERN_ERR, "Padding ", p + off,
+               print_section(KERN_ERR, "Padding  ", p + off,
                              size_from_object(s) - off);
 
        dump_stack();
@@ -909,11 +910,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
        u8 *endobject = object + s->object_size;
 
        if (s->flags & SLAB_RED_ZONE) {
-               if (!check_bytes_and_report(s, page, object, "Redzone",
+               if (!check_bytes_and_report(s, page, object, "Left Redzone",
                        object - s->red_left_pad, val, s->red_left_pad))
                        return 0;
 
-               if (!check_bytes_and_report(s, page, object, "Redzone",
+               if (!check_bytes_and_report(s, page, object, "Right Redzone",
                        endobject, val, s->inuse - s->object_size))
                        return 0;
        } else {
@@ -928,7 +929,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
                if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
                        (!check_bytes_and_report(s, page, p, "Poison", p,
                                        POISON_FREE, s->object_size - 1) ||
-                        !check_bytes_and_report(s, page, p, "Poison",
+                        !check_bytes_and_report(s, page, p, "End Poison",
                                p + s->object_size - 1, POISON_END, 1)))
                        return 0;
                /*
@@ -3689,7 +3690,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
        slab_flags_t flags = s->flags;
        unsigned int size = s->object_size;
-       unsigned int freepointer_area;
        unsigned int order;
 
        /*
@@ -3698,13 +3698,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
         * the possible location of the free pointer.
         */
        size = ALIGN(size, sizeof(void *));
-       /*
-        * This is the area of the object where a freepointer can be
-        * safely written. If redzoning adds more to the inuse size, we
-        * can't use that portion for writing the freepointer, so
-        * s->offset must be limited within this for the general case.
-        */
-       freepointer_area = size;
 
 #ifdef CONFIG_SLUB_DEBUG
        /*
@@ -3730,19 +3723,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 
        /*
         * With that we have determined the number of bytes in actual use
-        * by the object. This is the potential offset to the free pointer.
+        * by the object and redzoning.
         */
        s->inuse = size;
 
-       if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
-               s->ctor)) {
+       if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
+           ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
+           s->ctor) {
                /*
                 * Relocate free pointer after the object if it is not
                 * permitted to overwrite the first word of the object on
                 * kmem_cache_free.
                 *
                 * This is the case if we do RCU, have a constructor or
-                * destructor or are poisoning the objects.
+                * destructor, are poisoning the objects, or are
+                * redzoning an object smaller than sizeof(void *).
                 *
                 * The assumption that s->offset >= s->inuse means free
                 * pointer is outside of the object is used in the
@@ -3751,13 +3746,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
                 */
                s->offset = size;
                size += sizeof(void *);
-       } else if (freepointer_area > sizeof(void *)) {
+       } else {
                /*
                 * Store freelist pointer near middle of object to keep
                 * it away from the edges of the object to avoid small
                 * sized over/underflows from neighboring allocations.
                 */
-               s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
+               s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
        }
 
 #ifdef CONFIG_SLUB_DEBUG
index b2ada9d..55c18af 100644 (file)
@@ -344,6 +344,15 @@ size_t mem_section_usage_size(void)
        return sizeof(struct mem_section_usage) + usemap_size();
 }
 
+static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
+{
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+       return __pa_symbol(pgdat);
+#else
+       return __pa(pgdat);
+#endif
+}
+
 #ifdef CONFIG_MEMORY_HOTREMOVE
 static struct mem_section_usage * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
@@ -362,7 +371,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
         * from the same section as the pgdat where possible to avoid
         * this problem.
         */
-       goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+       goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
        limit = goal + (1UL << PA_SECTION_SHIFT);
        nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
 again:
@@ -390,7 +399,7 @@ static void __init check_usemap_section_nr(int nid,
        }
 
        usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
-       pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+       pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT);
        if (usemap_snr == pgdat_snr)
                return;
 
index 149e774..996afa8 100644 (file)
@@ -1900,7 +1900,7 @@ unsigned int count_swap_pages(int type, int free)
 
 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
 {
-       return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
+       return pte_same(pte_swp_clear_flags(pte), swp_pte);
 }
 
 /*
index 95af244..234ddd8 100644 (file)
@@ -167,13 +167,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  */
-static void
-truncate_cleanup_page(struct address_space *mapping, struct page *page)
+static void truncate_cleanup_page(struct page *page)
 {
-       if (page_mapped(page)) {
-               unsigned int nr = thp_nr_pages(page);
-               unmap_mapping_pages(mapping, page->index, nr, false);
-       }
+       if (page_mapped(page))
+               unmap_mapping_page(page);
 
        if (page_has_private(page))
                do_invalidatepage(page, 0, thp_size(page));
@@ -218,7 +215,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
        if (page->mapping != mapping)
                return -EIO;
 
-       truncate_cleanup_page(mapping, page);
+       truncate_cleanup_page(page);
        delete_from_page_cache(page);
        return 0;
 }
@@ -325,7 +322,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                index = indices[pagevec_count(&pvec) - 1] + 1;
                truncate_exceptional_pvec_entries(mapping, &pvec, indices);
                for (i = 0; i < pagevec_count(&pvec); i++)
-                       truncate_cleanup_page(mapping, pvec.pages[i]);
+                       truncate_cleanup_page(pvec.pages[i]);
                delete_from_page_cache_batch(mapping, &pvec);
                for (i = 0; i < pagevec_count(&pvec); i++)
                        unlock_page(pvec.pages[i]);
@@ -639,6 +636,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                continue;
                        }
 
+                       if (!did_range_unmap && page_mapped(page)) {
+                               /*
+                                * If page is mapped, before taking its lock,
+                                * zap the rest of the file in one hit.
+                                */
+                               unmap_mapping_pages(mapping, index,
+                                               (1 + end - index), false);
+                               did_range_unmap = 1;
+                       }
+
                        lock_page(page);
                        WARN_ON(page_to_index(page) != index);
                        if (page->mapping != mapping) {
@@ -646,23 +653,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                continue;
                        }
                        wait_on_page_writeback(page);
-                       if (page_mapped(page)) {
-                               if (!did_range_unmap) {
-                                       /*
-                                        * Zap the rest of the file in one hit.
-                                        */
-                                       unmap_mapping_pages(mapping, index,
-                                               (1 + end - index), false);
-                                       did_range_unmap = 1;
-                               } else {
-                                       /*
-                                        * Just zap this page
-                                        */
-                                       unmap_mapping_pages(mapping, index,
-                                                               1, false);
-                               }
-                       }
+
+                       if (page_mapped(page))
+                               unmap_mapping_page(page);
                        BUG_ON(page_mapped(page));
+
                        ret2 = do_launder_page(mapping, page);
                        if (ret2 == 0) {
                                if (!invalidate_complete_page2(mapping, page))
index e3f6ff0..1a705a4 100644 (file)
@@ -108,7 +108,8 @@ static inline netdev_features_t vlan_tnl_features(struct net_device *real_dev)
        netdev_features_t ret;
 
        ret = real_dev->hw_enc_features &
-             (NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO | NETIF_F_GSO_ENCAP_ALL);
+             (NETIF_F_CSUM_MASK | NETIF_F_GSO_SOFTWARE |
+              NETIF_F_GSO_ENCAP_ALL);
 
        if ((ret & NETIF_F_GSO_ENCAP_ALL) && (ret & NETIF_F_CSUM_MASK))
                return (ret & ~NETIF_F_CSUM_MASK) | NETIF_F_HW_CSUM;
index be18af4..c7236da 100644 (file)
@@ -768,7 +768,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
        if (a && a->status & ATIF_PROBE) {
                a->status |= ATIF_PROBE_FAIL;
                /*
-                * we do not respond to probe or request packets for
+                * we do not respond to probe or request packets of
                 * this address while we are probing this address
                 */
                goto unlock;
index 680def8..1202237 100644 (file)
@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
        if (WARN_ON(!forw_packet->if_outgoing))
                return;
 
-       if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
+       if (forw_packet->if_outgoing->soft_iface != soft_iface) {
+               pr_warn("%s: soft interface switch for queued OGM\n", __func__);
                return;
+       }
 
        if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
                return;
index 93144e0..4d93c6c 100644 (file)
@@ -3229,7 +3229,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
 {
        struct l2cap_chan *chan;
 
-       bt_dev_dbg(pchan->conn->hcon->hdev, "pchan %p", pchan);
+       BT_DBG("pchan %p", pchan);
 
        chan = l2cap_chan_create();
        if (!chan)
@@ -3250,7 +3250,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
         */
        atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
 
-       bt_dev_dbg(pchan->conn->hcon->hdev, "created chan %p", chan);
+       BT_DBG("created chan %p", chan);
 
        return chan;
 }
@@ -3354,7 +3354,7 @@ static void smp_del_chan(struct l2cap_chan *chan)
 {
        struct smp_dev *smp;
 
-       bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan);
+       BT_DBG("chan %p", chan);
 
        smp = chan->data;
        if (smp) {
index 001064f..a3c755d 100644 (file)
@@ -142,7 +142,7 @@ static void br_cfm_notify(int event, const struct net_bridge_port *port)
 {
        u32 filter = RTEXT_FILTER_CFM_STATUS;
 
-       return br_info_notify(event, port->br, NULL, filter);
+       br_info_notify(event, port->br, NULL, filter);
 }
 
 static void cc_peer_enable(struct br_cfm_peer_mep *peer_mep)
index 698b797..16f9434 100644 (file)
@@ -727,8 +727,9 @@ static inline size_t fdb_nlmsg_size(void)
 }
 
 static int br_fdb_replay_one(struct notifier_block *nb,
-                            struct net_bridge_fdb_entry *fdb,
-                            struct net_device *dev)
+                            const struct net_bridge_fdb_entry *fdb,
+                            struct net_device *dev, unsigned long action,
+                            const void *ctx)
 {
        struct switchdev_notifier_fdb_info item;
        int err;
@@ -737,17 +738,20 @@ static int br_fdb_replay_one(struct notifier_block *nb,
        item.vid = fdb->key.vlan_id;
        item.added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
        item.offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
+       item.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
        item.info.dev = dev;
+       item.info.ctx = ctx;
 
-       err = nb->notifier_call(nb, SWITCHDEV_FDB_ADD_TO_DEVICE, &item);
+       err = nb->notifier_call(nb, action, &item);
        return notifier_to_errno(err);
 }
 
-int br_fdb_replay(struct net_device *br_dev, struct net_device *dev,
-                 struct notifier_block *nb)
+int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
+                 const void *ctx, bool adding, struct notifier_block *nb)
 {
        struct net_bridge_fdb_entry *fdb;
        struct net_bridge *br;
+       unsigned long action;
        int err = 0;
 
        if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
@@ -755,17 +759,22 @@ int br_fdb_replay(struct net_device *br_dev, struct net_device *dev,
 
        br = netdev_priv(br_dev);
 
+       if (adding)
+               action = SWITCHDEV_FDB_ADD_TO_DEVICE;
+       else
+               action = SWITCHDEV_FDB_DEL_TO_DEVICE;
+
        rcu_read_lock();
 
        hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
-               struct net_bridge_port *dst = READ_ONCE(fdb->dst);
+               const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
                struct net_device *dst_dev;
 
                dst_dev = dst ? dst->dev : br->dev;
                if (dst_dev != br_dev && dst_dev != dev)
                        continue;
 
-               err = br_fdb_replay_one(nb, fdb, dst_dev);
+               err = br_fdb_replay_one(nb, fdb, dst_dev, action, ctx);
                if (err)
                        break;
        }
index 3f839a8..17a720b 100644 (file)
@@ -567,19 +567,21 @@ static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
 }
 
 static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
-                            struct switchdev_obj_port_mdb *mdb,
+                            const struct switchdev_obj_port_mdb *mdb,
+                            unsigned long action, const void *ctx,
                             struct netlink_ext_ack *extack)
 {
        struct switchdev_notifier_port_obj_info obj_info = {
                .info = {
                        .dev = dev,
                        .extack = extack,
+                       .ctx = ctx,
                },
                .obj = &mdb->obj,
        };
        int err;
 
-       err = nb->notifier_call(nb, SWITCHDEV_PORT_OBJ_ADD, &obj_info);
+       err = nb->notifier_call(nb, action, &obj_info);
        return notifier_to_errno(err);
 }
 
@@ -603,11 +605,13 @@ static int br_mdb_queue_one(struct list_head *mdb_list,
 }
 
 int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
-                 struct notifier_block *nb, struct netlink_ext_ack *extack)
+                 const void *ctx, bool adding, struct notifier_block *nb,
+                 struct netlink_ext_ack *extack)
 {
-       struct net_bridge_mdb_entry *mp;
+       const struct net_bridge_mdb_entry *mp;
        struct switchdev_obj *obj, *tmp;
        struct net_bridge *br;
+       unsigned long action;
        LIST_HEAD(mdb_list);
        int err = 0;
 
@@ -632,8 +636,8 @@ int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
        rcu_read_lock();
 
        hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
-               struct net_bridge_port_group __rcu **pp;
-               struct net_bridge_port_group *p;
+               struct net_bridge_port_group __rcu * const *pp;
+               const struct net_bridge_port_group *p;
 
                if (mp->host_joined) {
                        err = br_mdb_queue_one(&mdb_list,
@@ -662,9 +666,14 @@ int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
 
        rcu_read_unlock();
 
+       if (adding)
+               action = SWITCHDEV_PORT_OBJ_ADD;
+       else
+               action = SWITCHDEV_PORT_OBJ_DEL;
+
        list_for_each_entry(obj, &mdb_list, list) {
                err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
-                                       extack);
+                                       action, ctx, extack);
                if (err)
                        goto out_free_mdb;
        }
index ec66113..a684d0c 100644 (file)
@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
 #endif
 
 struct br_tunnel_info {
-       __be64                  tunnel_id;
-       struct metadata_dst     *tunnel_dst;
+       __be64                          tunnel_id;
+       struct metadata_dst __rcu       *tunnel_dst;
 };
 
 /* private vlan flags */
index 3dafb61..1d80f34 100644 (file)
@@ -639,9 +639,9 @@ int br_set_ageing_time(struct net_bridge *br, clock_t ageing_time)
        return 0;
 }
 
-clock_t br_get_ageing_time(struct net_device *br_dev)
+clock_t br_get_ageing_time(const struct net_device *br_dev)
 {
-       struct net_bridge *br;
+       const struct net_bridge *br;
 
        if (!netif_is_bridge_master(br_dev))
                return 0;
index da3256a..a08e9f1 100644 (file)
@@ -113,9 +113,7 @@ static void __vlan_add_list(struct net_bridge_vlan *v)
        headp = &vg->vlan_list;
        list_for_each_prev(hpos, headp) {
                vent = list_entry(hpos, struct net_bridge_vlan, vlist);
-               if (v->vid < vent->vid)
-                       continue;
-               else
+               if (v->vid >= vent->vid)
                        break;
        }
        list_add_rcu(&v->vlist, hpos);
@@ -1809,28 +1807,32 @@ out_kfree:
 static int br_vlan_replay_one(struct notifier_block *nb,
                              struct net_device *dev,
                              struct switchdev_obj_port_vlan *vlan,
+                             const void *ctx, unsigned long action,
                              struct netlink_ext_ack *extack)
 {
        struct switchdev_notifier_port_obj_info obj_info = {
                .info = {
                        .dev = dev,
                        .extack = extack,
+                       .ctx = ctx,
                },
                .obj = &vlan->obj,
        };
        int err;
 
-       err = nb->notifier_call(nb, SWITCHDEV_PORT_OBJ_ADD, &obj_info);
+       err = nb->notifier_call(nb, action, &obj_info);
        return notifier_to_errno(err);
 }
 
 int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
-                  struct notifier_block *nb, struct netlink_ext_ack *extack)
+                  const void *ctx, bool adding, struct notifier_block *nb,
+                  struct netlink_ext_ack *extack)
 {
        struct net_bridge_vlan_group *vg;
        struct net_bridge_vlan *v;
        struct net_bridge_port *p;
        struct net_bridge *br;
+       unsigned long action;
        int err = 0;
        u16 pvid;
 
@@ -1857,6 +1859,11 @@ int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
        if (!vg)
                return 0;
 
+       if (adding)
+               action = SWITCHDEV_PORT_OBJ_ADD;
+       else
+               action = SWITCHDEV_PORT_OBJ_DEL;
+
        pvid = br_get_pvid(vg);
 
        list_for_each_entry(v, &vg->vlan_list, vlist) {
@@ -1870,7 +1877,7 @@ int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
                if (!br_vlan_should_use(v))
                        continue;
 
-               err = br_vlan_replay_one(nb, dev, &vlan, extack);
+               err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
                if (err)
                        return err;
        }
index 0d3a8c0..0101744 100644 (file)
@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
                                      br_vlan_tunnel_rht_params);
 }
 
+static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
+{
+       struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
+
+       WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
+       RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
+       dst_release(&tdst->dst);
+}
+
 void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
                          struct net_bridge_vlan *vlan)
 {
-       if (!vlan->tinfo.tunnel_dst)
+       if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
                return;
        rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
                               br_vlan_tunnel_rht_params);
-       vlan->tinfo.tunnel_id = 0;
-       dst_release(&vlan->tinfo.tunnel_dst->dst);
-       vlan->tinfo.tunnel_dst = NULL;
+       vlan_tunnel_info_release(vlan);
 }
 
 static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
                                  struct net_bridge_vlan *vlan, u32 tun_id)
 {
-       struct metadata_dst *metadata = NULL;
+       struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
        __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
        int err;
 
-       if (vlan->tinfo.tunnel_dst)
+       if (metadata)
                return -EEXIST;
 
        metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
                return -EINVAL;
 
        metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
-       vlan->tinfo.tunnel_dst = metadata;
-       vlan->tinfo.tunnel_id = key;
+       rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
+       WRITE_ONCE(vlan->tinfo.tunnel_id, key);
 
        err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
                                            br_vlan_tunnel_rht_params);
@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
 
        return 0;
 out:
-       dst_release(&vlan->tinfo.tunnel_dst->dst);
-       vlan->tinfo.tunnel_dst = NULL;
-       vlan->tinfo.tunnel_id = 0;
+       vlan_tunnel_info_release(vlan);
 
        return err;
 }
@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
 int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
                                 struct net_bridge_vlan *vlan)
 {
+       struct metadata_dst *tunnel_dst;
+       __be64 tunnel_id;
        int err;
 
-       if (!vlan || !vlan->tinfo.tunnel_id)
+       if (!vlan)
                return 0;
 
-       if (unlikely(!skb_vlan_tag_present(skb)))
+       tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
+       if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
                return 0;
 
        skb_dst_drop(skb);
@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
        if (err)
                return err;
 
-       skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
+       tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
+       if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
+               skb_dst_set(skb, &tunnel_dst->dst);
 
        return 0;
 }
index cac30e6..23267c8 100644 (file)
@@ -480,7 +480,7 @@ got_phyid:
        phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
        if (!phyinfo) {
                res = -ENOMEM;
-               goto out_err;
+               goto out;
        }
 
        phy_layer->id = phyid;
index 909b9e6..f3e4d95 100644 (file)
@@ -125,7 +125,7 @@ struct bcm_sock {
        struct sock sk;
        int bound;
        int ifindex;
-       struct notifier_block notifier;
+       struct list_head notifier;
        struct list_head rx_ops;
        struct list_head tx_ops;
        unsigned long dropped_usr_msgs;
@@ -133,6 +133,10 @@ struct bcm_sock {
        char procname [32]; /* inode number in decimal with \0 */
 };
 
+static LIST_HEAD(bcm_notifier_list);
+static DEFINE_SPINLOCK(bcm_notifier_lock);
+static struct bcm_sock *bcm_busy_notifier;
+
 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
 {
        return (struct bcm_sock *)sk;
@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
                if (!op->count && (op->flags & TX_COUNTEVT)) {
 
                        /* create notification to user */
+                       memset(&msg_head, 0, sizeof(msg_head));
                        msg_head.opcode  = TX_EXPIRED;
                        msg_head.flags   = op->flags;
                        msg_head.count   = op->count;
@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
        /* this element is not throttled anymore */
        data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
 
+       memset(&head, 0, sizeof(head));
        head.opcode  = RX_CHANGED;
        head.flags   = op->flags;
        head.count   = op->count;
@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
        }
 
        /* create notification to user */
+       memset(&msg_head, 0, sizeof(msg_head));
        msg_head.opcode  = RX_TIMEOUT;
        msg_head.flags   = op->flags;
        msg_head.count   = op->count;
@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 /*
  * notification handler for netdevice status changes
  */
-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
-                       void *ptr)
+static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+                      struct net_device *dev)
 {
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
        struct sock *sk = &bo->sk;
        struct bcm_op *op;
        int notify_enodev = 0;
 
        if (!net_eq(dev_net(dev), sock_net(sk)))
-               return NOTIFY_DONE;
-
-       if (dev->type != ARPHRD_CAN)
-               return NOTIFY_DONE;
+               return;
 
        switch (msg) {
 
@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
                                sk->sk_error_report(sk);
                }
        }
+}
 
+static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
+                       void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (dev->type != ARPHRD_CAN)
+               return NOTIFY_DONE;
+       if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+               return NOTIFY_DONE;
+       if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
+               return NOTIFY_DONE;
+
+       spin_lock(&bcm_notifier_lock);
+       list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
+               spin_unlock(&bcm_notifier_lock);
+               bcm_notify(bcm_busy_notifier, msg, dev);
+               spin_lock(&bcm_notifier_lock);
+       }
+       bcm_busy_notifier = NULL;
+       spin_unlock(&bcm_notifier_lock);
        return NOTIFY_DONE;
 }
 
@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
        INIT_LIST_HEAD(&bo->rx_ops);
 
        /* set notifier */
-       bo->notifier.notifier_call = bcm_notifier;
-
-       register_netdevice_notifier(&bo->notifier);
+       spin_lock(&bcm_notifier_lock);
+       list_add_tail(&bo->notifier, &bcm_notifier_list);
+       spin_unlock(&bcm_notifier_lock);
 
        return 0;
 }
@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
 
        /* remove bcm_ops, timer, rx_unregister(), etc. */
 
-       unregister_netdevice_notifier(&bo->notifier);
+       spin_lock(&bcm_notifier_lock);
+       while (bcm_busy_notifier == bo) {
+               spin_unlock(&bcm_notifier_lock);
+               schedule_timeout_uninterruptible(1);
+               spin_lock(&bcm_notifier_lock);
+       }
+       list_del(&bo->notifier);
+       spin_unlock(&bcm_notifier_lock);
 
        lock_sock(sk);
 
@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
        .exit = canbcm_pernet_exit,
 };
 
+static struct notifier_block canbcm_notifier = {
+       .notifier_call = bcm_notifier
+};
+
 static int __init bcm_module_init(void)
 {
        int err;
@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
        }
 
        register_pernet_subsys(&canbcm_pernet_ops);
+       register_netdevice_notifier(&canbcm_notifier);
        return 0;
 }
 
 static void __exit bcm_module_exit(void)
 {
        can_proto_unregister(&bcm_can_proto);
+       unregister_netdevice_notifier(&canbcm_notifier);
        unregister_pernet_subsys(&canbcm_pernet_ops);
 }
 
index f995eae..bd49299 100644 (file)
@@ -143,10 +143,14 @@ struct isotp_sock {
        u32 force_tx_stmin;
        u32 force_rx_stmin;
        struct tpcon rx, tx;
-       struct notifier_block notifier;
+       struct list_head notifier;
        wait_queue_head_t wait;
 };
 
+static LIST_HEAD(isotp_notifier_list);
+static DEFINE_SPINLOCK(isotp_notifier_lock);
+static struct isotp_sock *isotp_busy_notifier;
+
 static inline struct isotp_sock *isotp_sk(const struct sock *sk)
 {
        return (struct isotp_sock *)sk;
@@ -1015,7 +1019,14 @@ static int isotp_release(struct socket *sock)
        /* wait for complete transmission of current pdu */
        wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
 
-       unregister_netdevice_notifier(&so->notifier);
+       spin_lock(&isotp_notifier_lock);
+       while (isotp_busy_notifier == so) {
+               spin_unlock(&isotp_notifier_lock);
+               schedule_timeout_uninterruptible(1);
+               spin_lock(&isotp_notifier_lock);
+       }
+       list_del(&so->notifier);
+       spin_unlock(&isotp_notifier_lock);
 
        lock_sock(sk);
 
@@ -1319,21 +1330,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
        return 0;
 }
 
-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
-                         void *ptr)
+static void isotp_notify(struct isotp_sock *so, unsigned long msg,
+                        struct net_device *dev)
 {
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
        struct sock *sk = &so->sk;
 
        if (!net_eq(dev_net(dev), sock_net(sk)))
-               return NOTIFY_DONE;
-
-       if (dev->type != ARPHRD_CAN)
-               return NOTIFY_DONE;
+               return;
 
        if (so->ifindex != dev->ifindex)
-               return NOTIFY_DONE;
+               return;
 
        switch (msg) {
        case NETDEV_UNREGISTER:
@@ -1359,7 +1365,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
                        sk->sk_error_report(sk);
                break;
        }
+}
 
+static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+                         void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (dev->type != ARPHRD_CAN)
+               return NOTIFY_DONE;
+       if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+               return NOTIFY_DONE;
+       if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
+               return NOTIFY_DONE;
+
+       spin_lock(&isotp_notifier_lock);
+       list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
+               spin_unlock(&isotp_notifier_lock);
+               isotp_notify(isotp_busy_notifier, msg, dev);
+               spin_lock(&isotp_notifier_lock);
+       }
+       isotp_busy_notifier = NULL;
+       spin_unlock(&isotp_notifier_lock);
        return NOTIFY_DONE;
 }
 
@@ -1396,8 +1423,9 @@ static int isotp_init(struct sock *sk)
 
        init_waitqueue_head(&so->wait);
 
-       so->notifier.notifier_call = isotp_notifier;
-       register_netdevice_notifier(&so->notifier);
+       spin_lock(&isotp_notifier_lock);
+       list_add_tail(&so->notifier, &isotp_notifier_list);
+       spin_unlock(&isotp_notifier_lock);
 
        return 0;
 }
@@ -1444,6 +1472,10 @@ static const struct can_proto isotp_can_proto = {
        .prot = &isotp_proto,
 };
 
+static struct notifier_block canisotp_notifier = {
+       .notifier_call = isotp_notifier
+};
+
 static __init int isotp_module_init(void)
 {
        int err;
@@ -1453,6 +1485,8 @@ static __init int isotp_module_init(void)
        err = can_proto_register(&isotp_can_proto);
        if (err < 0)
                pr_err("can: registration of isotp protocol failed %pe\n", ERR_PTR(err));
+       else
+               register_netdevice_notifier(&canisotp_notifier);
 
        return err;
 }
@@ -1460,6 +1494,7 @@ static __init int isotp_module_init(void)
 static __exit void isotp_module_exit(void)
 {
        can_proto_unregister(&isotp_can_proto);
+       unregister_netdevice_notifier(&canisotp_notifier);
 }
 
 module_init(isotp_module_init);
index e09d087..c3946c3 100644 (file)
@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
 
        if ((do_skcb->offset + do_skb->len) < offset_start) {
                __skb_unlink(do_skb, &session->skb_queue);
+               /* drop ref taken in j1939_session_skb_queue() */
+               skb_unref(do_skb);
+
                kfree_skb(do_skb);
        }
        spin_unlock_irqrestore(&session->skb_queue.lock, flags);
@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
 
        skcb->flags |= J1939_ECU_LOCAL_SRC;
 
+       skb_get(skb);
        skb_queue_tail(&session->skb_queue, skb);
 }
 
 static struct
-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
-                                         unsigned int offset_start)
+sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
+                                        unsigned int offset_start)
 {
        struct j1939_priv *priv = session->priv;
        struct j1939_sk_buff_cb *do_skcb;
@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
                        skb = do_skb;
                }
        }
+
+       if (skb)
+               skb_get(skb);
+
        spin_unlock_irqrestore(&session->skb_queue.lock, flags);
 
        if (!skb)
@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
        return skb;
 }
 
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
 {
        unsigned int offset_start;
 
        offset_start = session->pkt.dpo * 7;
-       return j1939_session_skb_find_by_offset(session, offset_start);
+       return j1939_session_skb_get_by_offset(session, offset_start);
 }
 
 /* see if we are receiver
@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
        int ret = 0;
        u8 dat[8];
 
-       se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
+       se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
        if (!se_skb)
                return -ENOBUFS;
 
@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
                        netdev_err_once(priv->ndev,
                                        "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
                                        __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
-                       return -EOVERFLOW;
+                       ret = -EOVERFLOW;
+                       goto out_free;
                }
 
                if (!len) {
@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
        if (pkt_done)
                j1939_tp_set_rxtimeout(session, 250);
 
+ out_free:
+       if (ret)
+               kfree_skb(se_skb);
+       else
+               consume_skb(se_skb);
+
        return ret;
 }
 
@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
 static int j1939_simple_txnext(struct j1939_session *session)
 {
        struct j1939_priv *priv = session->priv;
-       struct sk_buff *se_skb = j1939_session_skb_find(session);
+       struct sk_buff *se_skb = j1939_session_skb_get(session);
        struct sk_buff *skb;
        int ret;
 
@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
                return 0;
 
        skb = skb_clone(se_skb, GFP_ATOMIC);
-       if (!skb)
-               return -ENOMEM;
+       if (!skb) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
 
        can_skb_set_owner(skb, se_skb->sk);
 
@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
 
        ret = j1939_send_one(priv, skb);
        if (ret)
-               return ret;
+               goto out_free;
 
        j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
        j1939_sk_queue_activate_next(session);
 
-       return 0;
+ out_free:
+       if (ret)
+               kfree_skb(se_skb);
+       else
+               consume_skb(se_skb);
+
+       return ret;
 }
 
 static bool j1939_session_deactivate_locked(struct j1939_session *session)
@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
        struct sk_buff *skb;
 
        if (!session->transmission) {
-               skb = j1939_session_skb_find(session);
+               skb = j1939_session_skb_get(session);
                /* distribute among j1939 receivers */
                j1939_sk_recv(session->priv, skb);
+               consume_skb(skb);
        }
 
        j1939_session_deactivate_activate_next(session);
@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
 {
        struct j1939_priv *priv = session->priv;
        struct j1939_sk_buff_cb *skcb;
-       struct sk_buff *se_skb;
+       struct sk_buff *se_skb = NULL;
        const u8 *dat;
        u8 *tpdat;
        int offset;
@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                goto out_session_cancel;
        }
 
-       se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
+       se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
        if (!se_skb) {
                netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
                            session);
@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                j1939_tp_set_rxtimeout(session, 250);
        }
        session->last_cmd = 0xff;
+       consume_skb(se_skb);
        j1939_session_put(session);
 
        return;
 
  out_session_cancel:
+       kfree_skb(se_skb);
        j1939_session_timers_cancel(session);
        j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
        j1939_session_put(session);
index 139d947..ac96fc2 100644 (file)
@@ -83,7 +83,7 @@ struct raw_sock {
        struct sock sk;
        int bound;
        int ifindex;
-       struct notifier_block notifier;
+       struct list_head notifier;
        int loopback;
        int recv_own_msgs;
        int fd_frames;
@@ -95,6 +95,10 @@ struct raw_sock {
        struct uniqframe __percpu *uniq;
 };
 
+static LIST_HEAD(raw_notifier_list);
+static DEFINE_SPINLOCK(raw_notifier_lock);
+static struct raw_sock *raw_busy_notifier;
+
 /* Return pointer to store the extra msg flags for raw_recvmsg().
  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
  * in skb->cb.
@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
        return err;
 }
 
-static int raw_notifier(struct notifier_block *nb,
-                       unsigned long msg, void *ptr)
+static void raw_notify(struct raw_sock *ro, unsigned long msg,
+                      struct net_device *dev)
 {
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
        struct sock *sk = &ro->sk;
 
        if (!net_eq(dev_net(dev), sock_net(sk)))
-               return NOTIFY_DONE;
-
-       if (dev->type != ARPHRD_CAN)
-               return NOTIFY_DONE;
+               return;
 
        if (ro->ifindex != dev->ifindex)
-               return NOTIFY_DONE;
+               return;
 
        switch (msg) {
        case NETDEV_UNREGISTER:
@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
                        sk->sk_error_report(sk);
                break;
        }
+}
+
+static int raw_notifier(struct notifier_block *nb, unsigned long msg,
+                       void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (dev->type != ARPHRD_CAN)
+               return NOTIFY_DONE;
+       if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
+               return NOTIFY_DONE;
+       if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
+               return NOTIFY_DONE;
 
+       spin_lock(&raw_notifier_lock);
+       list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
+               spin_unlock(&raw_notifier_lock);
+               raw_notify(raw_busy_notifier, msg, dev);
+               spin_lock(&raw_notifier_lock);
+       }
+       raw_busy_notifier = NULL;
+       spin_unlock(&raw_notifier_lock);
        return NOTIFY_DONE;
 }
 
@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
                return -ENOMEM;
 
        /* set notifier */
-       ro->notifier.notifier_call = raw_notifier;
-
-       register_netdevice_notifier(&ro->notifier);
+       spin_lock(&raw_notifier_lock);
+       list_add_tail(&ro->notifier, &raw_notifier_list);
+       spin_unlock(&raw_notifier_lock);
 
        return 0;
 }
@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
 
        ro = raw_sk(sk);
 
-       unregister_netdevice_notifier(&ro->notifier);
+       spin_lock(&raw_notifier_lock);
+       while (raw_busy_notifier == ro) {
+               spin_unlock(&raw_notifier_lock);
+               schedule_timeout_uninterruptible(1);
+               spin_lock(&raw_notifier_lock);
+       }
+       list_del(&ro->notifier);
+       spin_unlock(&raw_notifier_lock);
 
        lock_sock(sk);
 
@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
        .prot       = &raw_proto,
 };
 
+static struct notifier_block canraw_notifier = {
+       .notifier_call = raw_notifier
+};
+
 static __init int raw_module_init(void)
 {
        int err;
@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
        err = can_proto_register(&raw_can_proto);
        if (err < 0)
                pr_err("can: registration of raw protocol failed\n");
+       else
+               register_netdevice_notifier(&canraw_notifier);
 
        return err;
 }
@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
 static __exit void raw_module_exit(void)
 {
        can_proto_unregister(&raw_can_proto);
+       unregister_netdevice_notifier(&canraw_notifier);
 }
 
 module_init(raw_module_init);
index 50531a2..991d09b 100644 (file)
@@ -3852,10 +3852,33 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        qdisc_calculate_pkt_len(skb, q);
 
        if (q->flags & TCQ_F_NOLOCK) {
+               if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
+                   qdisc_run_begin(q)) {
+                       /* Retest nolock_qdisc_is_empty() within the protection
+                        * of q->seqlock to protect from racing with requeuing.
+                        */
+                       if (unlikely(!nolock_qdisc_is_empty(q))) {
+                               rc = q->enqueue(skb, q, &to_free) &
+                                       NET_XMIT_MASK;
+                               __qdisc_run(q);
+                               qdisc_run_end(q);
+
+                               goto no_lock_out;
+                       }
+
+                       qdisc_bstats_cpu_update(q, skb);
+                       if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
+                           !nolock_qdisc_is_empty(q))
+                               __qdisc_run(q);
+
+                       qdisc_run_end(q);
+                       return NET_XMIT_SUCCESS;
+               }
+
                rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
-               if (likely(!netif_xmit_frozen_or_stopped(txq)))
-                       qdisc_run(q);
+               qdisc_run(q);
 
+no_lock_out:
                if (unlikely(to_free))
                        kfree_skb_list(to_free);
                return rc;
index 566ddd1..8fdd04f 100644 (file)
@@ -2709,23 +2709,16 @@ static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
                                    struct netlink_ext_ack *extack)
 {
        struct devlink_rate *devlink_rate;
-       u16 old_mode;
-       int err;
-
-       if (!devlink->ops->eswitch_mode_get)
-               return -EOPNOTSUPP;
-       err = devlink->ops->eswitch_mode_get(devlink, &old_mode);
-       if (err)
-               return err;
-
-       if (old_mode == mode)
-               return 0;
 
+       /* Take the lock to sync with devlink_rate_nodes_destroy() */
+       mutex_lock(&devlink->lock);
        list_for_each_entry(devlink_rate, &devlink->rate_list, list)
                if (devlink_rate_is_node(devlink_rate)) {
+                       mutex_unlock(&devlink->lock);
                        NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
                        return -EBUSY;
                }
+       mutex_unlock(&devlink->lock);
        return 0;
 }
 
@@ -9275,6 +9268,8 @@ void devlink_rate_leaf_destroy(struct devlink_port *devlink_port)
 
        mutex_lock(&devlink->lock);
        devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
+       if (devlink_rate->parent)
+               refcount_dec(&devlink_rate->parent->refcnt);
        list_del(&devlink_rate->list);
        devlink_port->devlink_rate = NULL;
        mutex_unlock(&devlink->lock);
index 2b2f333..53e85c7 100644 (file)
@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
 
                        write_lock(&n->lock);
                        if ((n->nud_state == NUD_FAILED) ||
+                           (n->nud_state == NUD_NOARP) ||
                            (tbl->is_multicast &&
                             tbl->is_multicast(n->primary_key)) ||
                            time_after(tref, n->updated))
index 43b6ac4..9b5a767 100644 (file)
@@ -641,6 +641,18 @@ void __put_net(struct net *net)
 }
 EXPORT_SYMBOL_GPL(__put_net);
 
+/**
+ * get_net_ns - increment the refcount of the network namespace
+ * @ns: common namespace (net)
+ *
+ * Returns the net's common namespace.
+ */
+struct ns_common *get_net_ns(struct ns_common *ns)
+{
+       return &get_net(container_of(ns, struct net, ns))->ns;
+}
+EXPORT_SYMBOL_GPL(get_net_ns);
+
 struct net *get_net_ns_by_fd(int fd)
 {
        struct file *file;
@@ -660,14 +672,8 @@ struct net *get_net_ns_by_fd(int fd)
        fput(file);
        return net;
 }
-
-#else
-struct net *get_net_ns_by_fd(int fd)
-{
-       return ERR_PTR(-EINVAL);
-}
-#endif
 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
+#endif
 
 struct net *get_net_ns_by_pid(pid_t pid)
 {
index 5baa86b..745965e 100644 (file)
@@ -4850,10 +4850,12 @@ static int rtnl_bridge_notify(struct net_device *dev)
        if (err < 0)
                goto errout;
 
-       if (!skb->len) {
-               err = -EINVAL;
+       /* Notification info is only filled for bridge ports, not the bridge
+        * device itself. Therefore, a zero notification length is valid and
+        * should not result in an error.
+        */
+       if (!skb->len)
                goto errout;
-       }
 
        rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
        return 0;
index a0b1d48..2531ac4 100644 (file)
@@ -1258,6 +1258,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
        struct sock *sk = skb->sk;
        struct sk_buff_head *q;
        unsigned long flags;
+       bool is_zerocopy;
        u32 lo, hi;
        u16 len;
 
@@ -1272,6 +1273,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
        len = uarg->len;
        lo = uarg->id;
        hi = uarg->id + len - 1;
+       is_zerocopy = uarg->zerocopy;
 
        serr = SKB_EXT_ERR(skb);
        memset(serr, 0, sizeof(*serr));
@@ -1279,7 +1281,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
        serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
        serr->ee.ee_data = hi;
        serr->ee.ee_info = lo;
-       if (!uarg->zerocopy)
+       if (!is_zerocopy)
                serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
 
        q = &sk->sk_error_queue;
index ddfa880..a2337b3 100644 (file)
@@ -1635,6 +1635,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                v.val = sk->sk_bound_dev_if;
                break;
 
+       case SO_NETNS_COOKIE:
+               lv = sizeof(u64);
+               if (len != lv)
+                       return -EINVAL;
+               v.val64 = sock_net(sk)->net_cookie;
+               break;
+
        default:
                /* We implement the SO_SNDLOWAT etc to not be settable
                 * (1003.1g 7).
index de5ee3a..3f00a28 100644 (file)
@@ -6,6 +6,7 @@
  * selecting the socket index from the array of available sockets.
  */
 
+#include <net/ip.h>
 #include <net/sock_reuseport.h>
 #include <linux/bpf.h>
 #include <linux/idr.h>
@@ -536,7 +537,7 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
 
        socks = READ_ONCE(reuse->num_socks);
        if (unlikely(!socks))
-               goto out;
+               goto failure;
 
        /* paired with smp_wmb() in __reuseport_add_sock() */
        smp_rmb();
@@ -546,13 +547,13 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
        if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
                if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req)
                        goto select_by_hash;
-               goto out;
+               goto failure;
        }
 
        if (!skb) {
                skb = alloc_skb(0, GFP_ATOMIC);
                if (!skb)
-                       goto out;
+                       goto failure;
                allocated = true;
        }
 
@@ -565,12 +566,18 @@ select_by_hash:
        if (!nsk)
                nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
 
-       if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt)))
+       if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
                nsk = NULL;
+               goto failure;
+       }
 
 out:
        rcu_read_unlock();
        return nsk;
+
+failure:
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+       goto out;
 }
 EXPORT_SYMBOL(reuseport_migrate_sock);
 
index b71e879..9000a8c 100644 (file)
@@ -219,21 +219,6 @@ static void dsa_tree_put(struct dsa_switch_tree *dst)
                kref_put(&dst->refcount, dsa_tree_release);
 }
 
-static bool dsa_port_is_dsa(struct dsa_port *port)
-{
-       return port->type == DSA_PORT_TYPE_DSA;
-}
-
-static bool dsa_port_is_cpu(struct dsa_port *port)
-{
-       return port->type == DSA_PORT_TYPE_CPU;
-}
-
-static bool dsa_port_is_user(struct dsa_port *dp)
-{
-       return dp->type == DSA_PORT_TYPE_USER;
-}
-
 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
                                                   struct device_node *dn)
 {
@@ -1259,6 +1244,13 @@ static int dsa_switch_parse_member_of(struct dsa_switch *ds,
        if (!ds->dst)
                return -ENOMEM;
 
+       if (dsa_switch_find(ds->dst->index, ds->index)) {
+               dev_err(ds->dev,
+                       "A DSA switch with index %d already exists in tree %d\n",
+                       ds->index, ds->dst->index);
+               return -EEXIST;
+       }
+
        return 0;
 }
 
index b8b1747..c871294 100644 (file)
@@ -84,7 +84,7 @@ struct dsa_notifier_vlan_info {
 
 /* DSA_NOTIFIER_MTU */
 struct dsa_notifier_mtu_info {
-       bool propagate_upstream;
+       bool targeted_match;
        int sw_index;
        int port;
        int mtu;
@@ -188,19 +188,23 @@ void dsa_port_disable_rt(struct dsa_port *dp);
 void dsa_port_disable(struct dsa_port *dp);
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
                         struct netlink_ext_ack *extack);
+int dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br,
+                             struct netlink_ext_ack *extack);
 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
 int dsa_port_lag_change(struct dsa_port *dp,
                        struct netdev_lag_lower_state_info *linfo);
 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
                      struct netdev_lag_upper_info *uinfo,
                      struct netlink_ext_ack *extack);
+int dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev,
+                          struct netlink_ext_ack *extack);
 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
                            struct netlink_ext_ack *extack);
 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
-                       bool propagate_upstream);
+                       bool targeted_match);
 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
                     u16 vid);
 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
index 6379d66..46089dd 100644 (file)
@@ -194,26 +194,51 @@ static int dsa_port_switchdev_sync(struct dsa_port *dp,
        if (err && err != -EOPNOTSUPP)
                return err;
 
-       err = br_mdb_replay(br, brport_dev,
-                           &dsa_slave_switchdev_blocking_notifier,
-                           extack);
+       err = br_mdb_replay(br, brport_dev, dp, true,
+                           &dsa_slave_switchdev_blocking_notifier, extack);
        if (err && err != -EOPNOTSUPP)
                return err;
 
-       err = br_fdb_replay(br, brport_dev, &dsa_slave_switchdev_notifier);
+       err = br_fdb_replay(br, brport_dev, dp, true,
+                           &dsa_slave_switchdev_notifier);
        if (err && err != -EOPNOTSUPP)
                return err;
 
-       err = br_vlan_replay(br, brport_dev,
-                            &dsa_slave_switchdev_blocking_notifier,
-                            extack);
+       err = br_vlan_replay(br, brport_dev, dp, true,
+                            &dsa_slave_switchdev_blocking_notifier, extack);
        if (err && err != -EOPNOTSUPP)
                return err;
 
        return 0;
 }
 
-static void dsa_port_switchdev_unsync(struct dsa_port *dp)
+static int dsa_port_switchdev_unsync_objs(struct dsa_port *dp,
+                                         struct net_device *br,
+                                         struct netlink_ext_ack *extack)
+{
+       struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
+       int err;
+
+       /* Delete the switchdev objects left on this port */
+       err = br_mdb_replay(br, brport_dev, dp, false,
+                           &dsa_slave_switchdev_blocking_notifier, extack);
+       if (err && err != -EOPNOTSUPP)
+               return err;
+
+       err = br_fdb_replay(br, brport_dev, dp, false,
+                           &dsa_slave_switchdev_notifier);
+       if (err && err != -EOPNOTSUPP)
+               return err;
+
+       err = br_vlan_replay(br, brport_dev, dp, false,
+                            &dsa_slave_switchdev_blocking_notifier, extack);
+       if (err && err != -EOPNOTSUPP)
+               return err;
+
+       return 0;
+}
+
+static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
 {
        /* Configure the port for standalone mode (no address learning,
         * flood everything).
@@ -279,6 +304,12 @@ out_rollback:
        return err;
 }
 
+int dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br,
+                             struct netlink_ext_ack *extack)
+{
+       return dsa_port_switchdev_unsync_objs(dp, br, extack);
+}
+
 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
 {
        struct dsa_notifier_bridge_info info = {
@@ -298,7 +329,7 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
        if (err)
                pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
 
-       dsa_port_switchdev_unsync(dp);
+       dsa_port_switchdev_unsync_attrs(dp);
 }
 
 int dsa_port_lag_change(struct dsa_port *dp,
@@ -366,6 +397,15 @@ err_lag_join:
        return err;
 }
 
+int dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag,
+                          struct netlink_ext_ack *extack)
+{
+       if (dp->bridge_dev)
+               return dsa_port_pre_bridge_leave(dp, dp->bridge_dev, extack);
+
+       return 0;
+}
+
 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
 {
        struct dsa_notifier_lag_info info = {
@@ -567,11 +607,11 @@ int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
 }
 
 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
-                       bool propagate_upstream)
+                       bool targeted_match)
 {
        struct dsa_notifier_mtu_info info = {
                .sw_index = dp->ds->index,
-               .propagate_upstream = propagate_upstream,
+               .targeted_match = targeted_match,
                .port = dp->index,
                .mtu = new_mtu,
        };
index 798944a..898ed9c 100644 (file)
@@ -271,13 +271,16 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
 }
 
-static int dsa_slave_port_attr_set(struct net_device *dev,
+static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
                                   const struct switchdev_attr *attr,
                                   struct netlink_ext_ack *extack)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
        int ret;
 
+       if (ctx && ctx != dp)
+               return 0;
+
        switch (attr->id) {
        case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
                if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
@@ -394,13 +397,16 @@ static int dsa_slave_vlan_add(struct net_device *dev,
        return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
 }
 
-static int dsa_slave_port_obj_add(struct net_device *dev,
+static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
                                  const struct switchdev_obj *obj,
                                  struct netlink_ext_ack *extack)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
        int err;
 
+       if (ctx && ctx != dp)
+               return 0;
+
        switch (obj->id) {
        case SWITCHDEV_OBJ_ID_PORT_MDB:
                if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
@@ -469,12 +475,15 @@ static int dsa_slave_vlan_del(struct net_device *dev,
        return 0;
 }
 
-static int dsa_slave_port_obj_del(struct net_device *dev,
+static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
                                  const struct switchdev_obj *obj)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
        int err;
 
+       if (ctx && ctx != dp)
+               return 0;
+
        switch (obj->id) {
        case SWITCHDEV_OBJ_ID_PORT_MDB:
                if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
@@ -1528,6 +1537,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
        struct dsa_port *dp = dsa_slave_to_port(dev);
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->dp->ds;
+       struct dsa_port *dp_iter;
        struct dsa_port *cpu_dp;
        int port = p->dp->index;
        int largest_mtu = 0;
@@ -1535,31 +1545,31 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
        int old_master_mtu;
        int mtu_limit;
        int cpu_mtu;
-       int err, i;
+       int err;
 
        if (!ds->ops->port_change_mtu)
                return -EOPNOTSUPP;
 
-       for (i = 0; i < ds->num_ports; i++) {
+       list_for_each_entry(dp_iter, &ds->dst->ports, list) {
                int slave_mtu;
 
-               if (!dsa_is_user_port(ds, i))
+               if (!dsa_port_is_user(dp_iter))
                        continue;
 
                /* During probe, this function will be called for each slave
                 * device, while not all of them have been allocated. That's
                 * ok, it doesn't change what the maximum is, so ignore it.
                 */
-               if (!dsa_to_port(ds, i)->slave)
+               if (!dp_iter->slave)
                        continue;
 
                /* Pretend that we already applied the setting, which we
                 * actually haven't (still haven't done all integrity checks)
                 */
-               if (i == port)
+               if (dp_iter == dp)
                        slave_mtu = new_mtu;
                else
-                       slave_mtu = dsa_to_port(ds, i)->slave->mtu;
+                       slave_mtu = dp_iter->slave->mtu;
 
                if (largest_mtu < slave_mtu)
                        largest_mtu = slave_mtu;
@@ -1585,14 +1595,15 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
                        goto out_master_failed;
 
                /* We only need to propagate the MTU of the CPU port to
-                * upstream switches.
+                * upstream switches, so create a non-targeted notifier which
+                * updates all switches.
                 */
-               err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
+               err = dsa_port_mtu_change(cpu_dp, cpu_mtu, false);
                if (err)
                        goto out_cpu_failed;
        }
 
-       err = dsa_port_mtu_change(dp, new_mtu, false);
+       err = dsa_port_mtu_change(dp, new_mtu, true);
        if (err)
                goto out_port_failed;
 
@@ -1606,7 +1617,7 @@ out_port_failed:
        if (new_master_mtu != old_master_mtu)
                dsa_port_mtu_change(cpu_dp, old_master_mtu -
                                    dsa_tag_protocol_overhead(cpu_dp->tag_ops),
-                                   true);
+                                   false);
 out_cpu_failed:
        if (new_master_mtu != old_master_mtu)
                dev_set_mtu(master, old_master_mtu);
@@ -2066,6 +2077,26 @@ static int dsa_slave_changeupper(struct net_device *dev,
        return err;
 }
 
+static int dsa_slave_prechangeupper(struct net_device *dev,
+                                   struct netdev_notifier_changeupper_info *info)
+{
+       struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct netlink_ext_ack *extack;
+       int err = 0;
+
+       extack = netdev_notifier_info_to_extack(&info->info);
+
+       if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+               err = dsa_port_pre_bridge_leave(dp, info->upper_dev, extack);
+       else if (netif_is_lag_master(info->upper_dev) && !info->linking)
+               err = dsa_port_pre_lag_leave(dp, info->upper_dev, extack);
+       /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
+        * meaningfully enslaved to a bridge yet
+        */
+
+       return notifier_from_errno(err);
+}
+
 static int
 dsa_slave_lag_changeupper(struct net_device *dev,
                          struct netdev_notifier_changeupper_info *info)
@@ -2092,6 +2123,35 @@ dsa_slave_lag_changeupper(struct net_device *dev,
        return err;
 }
 
+/* Same as dsa_slave_lag_changeupper() except that it calls
+ * dsa_slave_prechangeupper()
+ */
+static int
+dsa_slave_lag_prechangeupper(struct net_device *dev,
+                            struct netdev_notifier_changeupper_info *info)
+{
+       struct net_device *lower;
+       struct list_head *iter;
+       int err = NOTIFY_DONE;
+       struct dsa_port *dp;
+
+       netdev_for_each_lower_dev(dev, lower, iter) {
+               if (!dsa_slave_dev_check(lower))
+                       continue;
+
+               dp = dsa_slave_to_port(lower);
+               if (!dp->lag_dev)
+                       /* Software LAG */
+                       continue;
+
+               err = dsa_slave_prechangeupper(lower, info);
+               if (notifier_to_errno(err))
+                       break;
+       }
+
+       return err;
+}
+
 static int
 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
                                 struct netdev_notifier_changeupper_info *info)
@@ -2155,6 +2215,32 @@ dsa_slave_check_8021q_upper(struct net_device *dev,
        return NOTIFY_DONE;
 }
 
+static int
+dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
+                                     struct netdev_notifier_changeupper_info *info)
+{
+       struct dsa_switch *ds;
+       struct dsa_port *dp;
+       int err;
+
+       if (!dsa_slave_dev_check(dev))
+               return dsa_prevent_bridging_8021q_upper(dev, info);
+
+       dp = dsa_slave_to_port(dev);
+       ds = dp->ds;
+
+       if (ds->ops->port_prechangeupper) {
+               err = ds->ops->port_prechangeupper(ds, dp->index, info);
+               if (err)
+                       return notifier_from_errno(err);
+       }
+
+       if (is_vlan_dev(info->upper_dev))
+               return dsa_slave_check_8021q_upper(dev, info);
+
+       return NOTIFY_DONE;
+}
+
 static int dsa_slave_netdevice_event(struct notifier_block *nb,
                                     unsigned long event, void *ptr)
 {
@@ -2163,24 +2249,18 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
        switch (event) {
        case NETDEV_PRECHANGEUPPER: {
                struct netdev_notifier_changeupper_info *info = ptr;
-               struct dsa_switch *ds;
-               struct dsa_port *dp;
                int err;
 
-               if (!dsa_slave_dev_check(dev))
-                       return dsa_prevent_bridging_8021q_upper(dev, ptr);
+               err = dsa_slave_prechangeupper_sanity_check(dev, info);
+               if (err != NOTIFY_DONE)
+                       return err;
 
-               dp = dsa_slave_to_port(dev);
-               ds = dp->ds;
+               if (dsa_slave_dev_check(dev))
+                       return dsa_slave_prechangeupper(dev, ptr);
 
-               if (ds->ops->port_prechangeupper) {
-                       err = ds->ops->port_prechangeupper(ds, dp->index, info);
-                       if (err)
-                               return notifier_from_errno(err);
-               }
+               if (netif_is_lag_master(dev))
+                       return dsa_slave_lag_prechangeupper(dev, ptr);
 
-               if (is_vlan_dev(info->upper_dev))
-                       return dsa_slave_check_8021q_upper(dev, ptr);
                break;
        }
        case NETDEV_CHANGEUPPER:
index 9bf8e20..c1e5afa 100644 (file)
@@ -52,10 +52,13 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds,
 static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
                                 struct dsa_notifier_mtu_info *info)
 {
-       if (ds->index == info->sw_index)
-               return (port == info->port) || dsa_is_dsa_port(ds, port);
+       if (ds->index == info->sw_index && port == info->port)
+               return true;
 
-       if (!info->propagate_upstream)
+       /* Do not propagate to other switches in the tree if the notifier was
+        * targeted for a single switch.
+        */
+       if (info->targeted_match)
                return false;
 
        if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
@@ -232,36 +235,15 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
        return 0;
 }
 
-static bool dsa_switch_mdb_match(struct dsa_switch *ds, int port,
-                                struct dsa_notifier_mdb_info *info)
-{
-       if (ds->index == info->sw_index && port == info->port)
-               return true;
-
-       if (dsa_is_dsa_port(ds, port))
-               return true;
-
-       return false;
-}
-
 static int dsa_switch_mdb_add(struct dsa_switch *ds,
                              struct dsa_notifier_mdb_info *info)
 {
-       int err = 0;
-       int port;
+       int port = dsa_towards_port(ds, info->sw_index, info->port);
 
        if (!ds->ops->port_mdb_add)
                return -EOPNOTSUPP;
 
-       for (port = 0; port < ds->num_ports; port++) {
-               if (dsa_switch_mdb_match(ds, port, info)) {
-                       err = ds->ops->port_mdb_add(ds, port, info->mdb);
-                       if (err)
-                               break;
-               }
-       }
-
-       return err;
+       return ds->ops->port_mdb_add(ds, port, info->mdb);
 }
 
 static int dsa_switch_mdb_del(struct dsa_switch *ds,
@@ -364,36 +346,16 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
        return 0;
 }
 
-static bool dsa_switch_mrp_match(struct dsa_switch *ds, int port,
-                                struct dsa_notifier_mrp_info *info)
-{
-       if (ds->index == info->sw_index && port == info->port)
-               return true;
-
-       if (dsa_is_dsa_port(ds, port))
-               return true;
-
-       return false;
-}
-
 static int dsa_switch_mrp_add(struct dsa_switch *ds,
                              struct dsa_notifier_mrp_info *info)
 {
-       int err = 0;
-       int port;
-
        if (!ds->ops->port_mrp_add)
                return -EOPNOTSUPP;
 
-       for (port = 0; port < ds->num_ports; port++) {
-               if (dsa_switch_mrp_match(ds, port, info)) {
-                       err = ds->ops->port_mrp_add(ds, port, info->mrp);
-                       if (err)
-                               break;
-               }
-       }
+       if (ds->index == info->sw_index)
+               return ds->ops->port_mrp_add(ds, info->port, info->mrp);
 
-       return err;
+       return 0;
 }
 
 static int dsa_switch_mrp_del(struct dsa_switch *ds,
@@ -408,39 +370,18 @@ static int dsa_switch_mrp_del(struct dsa_switch *ds,
        return 0;
 }
 
-static bool
-dsa_switch_mrp_ring_role_match(struct dsa_switch *ds, int port,
-                              struct dsa_notifier_mrp_ring_role_info *info)
-{
-       if (ds->index == info->sw_index && port == info->port)
-               return true;
-
-       if (dsa_is_dsa_port(ds, port))
-               return true;
-
-       return false;
-}
-
 static int
 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
                             struct dsa_notifier_mrp_ring_role_info *info)
 {
-       int err = 0;
-       int port;
-
        if (!ds->ops->port_mrp_add)
                return -EOPNOTSUPP;
 
-       for (port = 0; port < ds->num_ports; port++) {
-               if (dsa_switch_mrp_ring_role_match(ds, port, info)) {
-                       err = ds->ops->port_mrp_add_ring_role(ds, port,
-                                                             info->mrp);
-                       if (err)
-                               break;
-               }
-       }
+       if (ds->index == info->sw_index)
+               return ds->ops->port_mrp_add_ring_role(ds, info->port,
+                                                      info->mrp);
 
-       return err;
+       return 0;
 }
 
 static int
index 2a6733a..7e6b37a 100644 (file)
@@ -95,7 +95,7 @@ static int get_module_eeprom_by_page(struct net_device *dev,
        if (dev->sfp_bus)
                return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack);
 
-       if (ops->get_module_info)
+       if (ops->get_module_eeprom_by_page)
                return ops->get_module_eeprom_by_page(dev, page_data, extack);
 
        return -EOPNOTSUPP;
@@ -159,9 +159,6 @@ static int eeprom_parse_request(struct ethnl_req_info *req_info, struct nlattr *
        request->offset = nla_get_u32(tb[ETHTOOL_A_MODULE_EEPROM_OFFSET]);
        request->length = nla_get_u32(tb[ETHTOOL_A_MODULE_EEPROM_LENGTH]);
 
-       if (!request->length)
-               return -EINVAL;
-
        /* The following set of conditions limit the API to only dump 1/2
         * EEPROM page without crossing low page boundary located at offset 128.
         * This means user may only request dumps of length limited to 128 from
@@ -180,10 +177,6 @@ static int eeprom_parse_request(struct ethnl_req_info *req_info, struct nlattr *
                NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_LENGTH],
                                    "reading cross half page boundary is illegal");
                return -EINVAL;
-       } else if (request->offset >= ETH_MODULE_EEPROM_PAGE_LEN * 2) {
-               NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_OFFSET],
-                                   "offset is out of bounds");
-               return -EINVAL;
        } else if (request->offset + request->length > ETH_MODULE_EEPROM_PAGE_LEN * 2) {
                NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MODULE_EEPROM_LENGTH],
                                    "reading cross page boundary is illegal");
@@ -236,8 +229,10 @@ const struct ethnl_request_ops ethnl_module_eeprom_request_ops = {
 
 const struct nla_policy ethnl_module_eeprom_get_policy[] = {
        [ETHTOOL_A_MODULE_EEPROM_HEADER]        = NLA_POLICY_NESTED(ethnl_header_policy),
-       [ETHTOOL_A_MODULE_EEPROM_OFFSET]        = { .type = NLA_U32 },
-       [ETHTOOL_A_MODULE_EEPROM_LENGTH]        = { .type = NLA_U32 },
+       [ETHTOOL_A_MODULE_EEPROM_OFFSET]        =
+               NLA_POLICY_MAX(NLA_U32, ETH_MODULE_EEPROM_PAGE_LEN * 2 - 1),
+       [ETHTOOL_A_MODULE_EEPROM_LENGTH]        =
+               NLA_POLICY_RANGE(NLA_U32, 1, ETH_MODULE_EEPROM_PAGE_LEN),
        [ETHTOOL_A_MODULE_EEPROM_PAGE]          = { .type = NLA_U8 },
        [ETHTOOL_A_MODULE_EEPROM_BANK]          = { .type = NLA_U8 },
        [ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS]   =
index 3fa7a39..baa5d10 100644 (file)
@@ -1421,7 +1421,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
        if (eeprom.offset + eeprom.len > total_len)
                return -EINVAL;
 
-       data = kmalloc(PAGE_SIZE, GFP_USER);
+       data = kzalloc(PAGE_SIZE, GFP_USER);
        if (!data)
                return -ENOMEM;
 
@@ -1486,7 +1486,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
        if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
                return -EINVAL;
 
-       data = kmalloc(PAGE_SIZE, GFP_USER);
+       data = kzalloc(PAGE_SIZE, GFP_USER);
        if (!data)
                return -ENOMEM;
 
@@ -1765,7 +1765,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
                return -EFAULT;
 
        test.len = test_len;
-       data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
+       data = kcalloc(test_len, sizeof(u64), GFP_USER);
        if (!data)
                return -ENOMEM;
 
@@ -2293,7 +2293,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
        ret = ethtool_tunable_valid(&tuna);
        if (ret)
                return ret;
-       data = kmalloc(tuna.len, GFP_USER);
+       data = kzalloc(tuna.len, GFP_USER);
        if (!data)
                return -ENOMEM;
        ret = ops->get_tunable(dev, &tuna, data);
@@ -2485,7 +2485,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
        ret = ethtool_phy_tunable_valid(&tuna);
        if (ret)
                return ret;
-       data = kmalloc(tuna.len, GFP_USER);
+       data = kzalloc(tuna.len, GFP_USER);
        if (!data)
                return -ENOMEM;
        if (phy_drv_tunable) {
index 90b1096..3e25a47 100644 (file)
@@ -380,7 +380,7 @@ extern const struct nla_policy ethnl_cable_test_tdr_act_policy[ETHTOOL_A_CABLE_T
 extern const struct nla_policy ethnl_tunnel_info_get_policy[ETHTOOL_A_TUNNEL_INFO_HEADER + 1];
 extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1];
 extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1];
-extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_DATA + 1];
+extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1];
 extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1];
 
 int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
index b3029ff..2d51b7a 100644 (file)
@@ -353,6 +353,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
        int len = 0;
        int ret;
 
+       len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
+
        for (i = 0; i < ETH_SS_COUNT; i++) {
                const struct strset_info *set_info = &data->sets[i];
 
index 750f388..5464818 100644 (file)
@@ -575,7 +575,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
                        return err;
        }
 
-       if (!inet_sk(sk)->inet_num && inet_autobind(sk))
+       if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
                return -EAGAIN;
        return sk->sk_prot->connect(sk, uaddr, addr_len);
 }
@@ -803,7 +803,7 @@ int inet_send_prepare(struct sock *sk)
        sock_rps_record_flow(sk);
 
        /* We may need to bind the socket. */
-       if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
+       if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
            inet_autobind(sk))
                return -EAGAIN;
 
index 36ed85b..2d2d08a 100644 (file)
@@ -554,7 +554,6 @@ static int ah4_rcv_cb(struct sk_buff *skb, int err)
 
 static const struct xfrm_type ah_type =
 {
-       .description    = "AH4",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_AH,
        .flags          = XFRM_TYPE_REPLAY_PROT,
index d6e3a92..099259f 100644 (file)
@@ -471,6 +471,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
                kfree(doi_def->map.std->lvl.local);
                kfree(doi_def->map.std->cat.cipso);
                kfree(doi_def->map.std->cat.local);
+               kfree(doi_def->map.std);
                break;
        }
        kfree(doi_def);
index 50deeff..73721a4 100644 (file)
@@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
                return -EAFNOSUPPORT;
 
        if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
-               BUG();
+               return -EINVAL;
 
        if (tb[IFLA_INET_CONF]) {
                nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
index 35803ab..f5362b9 100644 (file)
@@ -1198,7 +1198,6 @@ static int esp4_rcv_cb(struct sk_buff *skb, int err)
 
 static const struct xfrm_type esp_type =
 {
-       .description    = "ESP4",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_ESP,
        .flags          = XFRM_TYPE_REPLAY_PROT,
index 33687cf..8e4e9aa 100644 (file)
@@ -33,12 +33,11 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
        struct xfrm_state *x;
        __be32 seq;
        __be32 spi;
-       int err;
 
        if (!pskb_pull(skb, offset))
                return NULL;
 
-       if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
+       if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
                goto out;
 
        xo = xfrm_offload(skb);
@@ -343,7 +342,6 @@ static const struct net_offload esp4_offload = {
 };
 
 static const struct xfrm_type_offload esp_type_offload = {
-       .description    = "ESP4 OFFLOAD",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_ESP,
        .input_tail     = esp_input_tail,
index 2e09d62..c695d29 100644 (file)
@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
                icmp_param.data_len = room;
        icmp_param.head_len = sizeof(struct icmphdr);
 
+       /* if we don't have a source address at this point, fall back to the
+        * dummy address instead of sending out a packet with a source address
+        * of 0.0.0.0
+        */
+       if (!fl4.saddr)
+               fl4.saddr = htonl(INADDR_DUMMY);
+
        icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
 ende:
        ip_rt_put(rt);
@@ -986,14 +993,8 @@ static bool icmp_redirect(struct sk_buff *skb)
 
 static bool icmp_echo(struct sk_buff *skb)
 {
-       struct icmp_ext_hdr *ext_hdr, _ext_hdr;
-       struct icmp_ext_echo_iio *iio, _iio;
        struct icmp_bxm icmp_param;
-       struct net_device *dev;
-       char buff[IFNAMSIZ];
        struct net *net;
-       u16 ident_len;
-       u8 status;
 
        net = dev_net(skb_dst(skb)->dev);
        /* should there be an ICMP stat for ignored echos? */
@@ -1006,20 +1007,46 @@ static bool icmp_echo(struct sk_buff *skb)
        icmp_param.data_len        = skb->len;
        icmp_param.head_len        = sizeof(struct icmphdr);
 
-       if (icmp_param.data.icmph.type == ICMP_ECHO) {
+       if (icmp_param.data.icmph.type == ICMP_ECHO)
                icmp_param.data.icmph.type = ICMP_ECHOREPLY;
-               goto send_reply;
-       }
-       if (!net->ipv4.sysctl_icmp_echo_enable_probe)
+       else if (!icmp_build_probe(skb, &icmp_param.data.icmph))
                return true;
+
+       icmp_reply(&icmp_param, skb);
+       return true;
+}
+
+/*     Helper for icmp_echo and icmpv6_echo_reply.
+ *     Searches for net_device that matches PROBE interface identifier
+ *             and builds PROBE reply message in icmphdr.
+ *
+ *     Returns false if PROBE responses are disabled via sysctl
+ */
+
+bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
+{
+       struct icmp_ext_hdr *ext_hdr, _ext_hdr;
+       struct icmp_ext_echo_iio *iio, _iio;
+       struct net *net = dev_net(skb->dev);
+       struct net_device *dev;
+       char buff[IFNAMSIZ];
+       u16 ident_len;
+       u8 status;
+
+       if (!net->ipv4.sysctl_icmp_echo_enable_probe)
+               return false;
+
        /* We currently only support probing interfaces on the proxy node
         * Check to ensure L-bit is set
         */
-       if (!(ntohs(icmp_param.data.icmph.un.echo.sequence) & 1))
-               return true;
+       if (!(ntohs(icmphdr->un.echo.sequence) & 1))
+               return false;
        /* Clear status bits in reply message */
-       icmp_param.data.icmph.un.echo.sequence &= htons(0xFF00);
-       icmp_param.data.icmph.type = ICMP_EXT_ECHOREPLY;
+       icmphdr->un.echo.sequence &= htons(0xFF00);
+       if (icmphdr->type == ICMP_EXT_ECHO)
+               icmphdr->type = ICMP_EXT_ECHOREPLY;
+       else
+               icmphdr->type = ICMPV6_EXT_ECHO_REPLY;
        ext_hdr = skb_header_pointer(skb, 0, sizeof(_ext_hdr), &_ext_hdr);
        /* Size of iio is class_type dependent.
         * Only check header here and assign length based on ctype in the switch statement
@@ -1080,8 +1107,8 @@ static bool icmp_echo(struct sk_buff *skb)
                goto send_mal_query;
        }
        if (!dev) {
-               icmp_param.data.icmph.code = ICMP_EXT_CODE_NO_IF;
-               goto send_reply;
+               icmphdr->code = ICMP_EXT_CODE_NO_IF;
+               return true;
        }
        /* Fill bits in reply message */
        if (dev->flags & IFF_UP)
@@ -1091,14 +1118,13 @@ static bool icmp_echo(struct sk_buff *skb)
        if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
                status |= ICMP_EXT_ECHOREPLY_IPV6;
        dev_put(dev);
-       icmp_param.data.icmph.un.echo.sequence |= htons(status);
-send_reply:
-       icmp_reply(&icmp_param, skb);
-               return true;
+       icmphdr->un.echo.sequence |= htons(status);
+       return true;
 send_mal_query:
-       icmp_param.data.icmph.code = ICMP_EXT_CODE_MAL_QUERY;
-       goto send_reply;
+       icmphdr->code = ICMP_EXT_CODE_MAL_QUERY;
+       return true;
 }
+EXPORT_SYMBOL_GPL(icmp_build_probe);
 
 /*
  *     Handle ICMP Timestamp requests.
index 7b272bb..6b3c558 100644 (file)
@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
        while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
                in_dev->mc_list = i->next_rcu;
                in_dev->mc_count--;
+               ip_mc_clear_src(i);
                ip_ma_put(i);
        }
 }
index 0eea878..754013f 100644 (file)
@@ -703,6 +703,8 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
 
        nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
        if (!nreq) {
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+
                /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
                sock_put(sk);
                return NULL;
@@ -876,9 +878,10 @@ static void reqsk_timer_handler(struct timer_list *t)
                if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
                        /* delete timer */
                        inet_csk_reqsk_queue_drop(sk_listener, nreq);
-                       goto drop;
+                       goto no_ownership;
                }
 
+               __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
                reqsk_migrate_reset(oreq);
                reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
                reqsk_put(oreq);
@@ -887,17 +890,19 @@ static void reqsk_timer_handler(struct timer_list *t)
                return;
        }
 
-drop:
        /* Even if we can clone the req, we may need not retransmit any more
         * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
         * CPU may win the "own_req" race so that inet_ehash_insert() fails.
         */
        if (nreq) {
+               __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
+no_ownership:
                reqsk_migrate_reset(nreq);
                reqsk_queue_removed(queue, nreq);
                __reqsk_free(nreq);
        }
 
+drop:
        inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
 }
 
@@ -1135,11 +1140,13 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
 
                        refcount_set(&nreq->rsk_refcnt, 1);
                        if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
+                               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
                                reqsk_migrate_reset(req);
                                reqsk_put(req);
                                return child;
                        }
 
+                       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
                        reqsk_migrate_reset(nreq);
                        __reqsk_free(nreq);
                } else if (inet_csk_reqsk_queue_add(sk, req, child)) {
@@ -1188,8 +1195,12 @@ void inet_csk_listen_stop(struct sock *sk)
                                refcount_set(&nreq->rsk_refcnt, 1);
 
                                if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
+                                       __NET_INC_STATS(sock_net(nsk),
+                                                       LINUX_MIB_TCPMIGRATEREQSUCCESS);
                                        reqsk_migrate_reset(req);
                                } else {
+                                       __NET_INC_STATS(sock_net(nsk),
+                                                       LINUX_MIB_TCPMIGRATEREQFAILURE);
                                        reqsk_migrate_reset(nreq);
                                        __reqsk_free(nreq);
                                }
index a68bf4c..12dca0c 100644 (file)
@@ -107,6 +107,8 @@ module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
+static const struct header_ops ipgre_header_ops;
+
 static int ipgre_tunnel_init(struct net_device *dev);
 static void erspan_build_header(struct sk_buff *skb,
                                u32 id, u32 index,
@@ -364,7 +366,10 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
                                           raw_proto, false) < 0)
                        goto drop;
 
-               if (tunnel->dev->type != ARPHRD_NONE)
+               /* Special case for ipgre_header_parse(), which expects the
+                * mac_header to point to the outer IP header.
+                */
+               if (tunnel->dev->header_ops == &ipgre_header_ops)
                        skb_pop_mac_header(skb);
                else
                        skb_reset_mac_header(skb);
index c3efc7d..8d8a8da 100644 (file)
@@ -1054,7 +1054,7 @@ static int __ip_append_data(struct sock *sk,
                        unsigned int datalen;
                        unsigned int fraglen;
                        unsigned int fraggap;
-                       unsigned int alloclen;
+                       unsigned int alloclen, alloc_extra;
                        unsigned int pagedlen;
                        struct sk_buff *skb_prev;
 alloc_new_skb:
@@ -1074,35 +1074,39 @@ alloc_new_skb:
                        fraglen = datalen + fragheaderlen;
                        pagedlen = 0;
 
+                       alloc_extra = hh_len + 15;
+                       alloc_extra += exthdrlen;
+
+                       /* The last fragment gets additional space at tail.
+                        * Note, with MSG_MORE we overallocate on fragments,
+                        * because we have no idea what fragment will be
+                        * the last.
+                        */
+                       if (datalen == length + fraggap)
+                               alloc_extra += rt->dst.trailer_len;
+
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
-                       else if (!paged)
+                       else if (!paged &&
+                                (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+                                 !(rt->dst.dev->features & NETIF_F_SG)))
                                alloclen = fraglen;
                        else {
                                alloclen = min_t(int, fraglen, MAX_HEADER);
                                pagedlen = fraglen - alloclen;
                        }
 
-                       alloclen += exthdrlen;
-
-                       /* The last fragment gets additional space at tail.
-                        * Note, with MSG_MORE we overallocate on fragments,
-                        * because we have no idea what fragment will be
-                        * the last.
-                        */
-                       if (datalen == length + fraggap)
-                               alloclen += rt->dst.trailer_len;
+                       alloclen += alloc_extra;
 
                        if (transhdrlen) {
-                               skb = sock_alloc_send_skb(sk,
-                                               alloclen + hh_len + 15,
+                               skb = sock_alloc_send_skb(sk, alloclen,
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
                                if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
                                    2 * sk->sk_sndbuf)
-                                       skb = alloc_skb(alloclen + hh_len + 15,
+                                       skb = alloc_skb(alloclen,
                                                        sk->sk_allocation);
                                if (unlikely(!skb))
                                        err = -ENOBUFS;
index b426832..2e69e81 100644 (file)
@@ -152,7 +152,6 @@ static int ipcomp4_rcv_cb(struct sk_buff *skb, int err)
 }
 
 static const struct xfrm_type ipcomp_type = {
-       .description    = "IPCOMP4",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_COMP,
        .init_state     = ipcomp4_init_state,
index d5bfa08..266c655 100644 (file)
@@ -242,6 +242,8 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
                        if (!tun_dst)
                                return 0;
                }
+               skb_reset_mac_header(skb);
+
                return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
        }
 
index 1c9f71a..95a7183 100644 (file)
@@ -954,6 +954,7 @@ bool ping_rcv(struct sk_buff *skb)
        struct sock *sk;
        struct net *net = dev_net(skb->dev);
        struct icmphdr *icmph = icmp_hdr(skb);
+       bool rc = false;
 
        /* We assume the packet has already been checked by icmp_rcv */
 
@@ -968,14 +969,15 @@ bool ping_rcv(struct sk_buff *skb)
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                pr_debug("rcv on socket %p\n", sk);
-               if (skb2)
-                       ping_queue_rcv_skb(sk, skb2);
+               if (skb2 && !ping_queue_rcv_skb(sk, skb2))
+                       rc = true;
                sock_put(sk);
-               return true;
        }
-       pr_debug("no socket, dropping\n");
 
-       return false;
+       if (!rc)
+               pr_debug("no socket, dropping\n");
+
+       return rc;
 }
 EXPORT_SYMBOL_GPL(ping_rcv);
 
index 6d46297..b0d3a09 100644 (file)
@@ -295,6 +295,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH),
        SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS),
        SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS),
+       SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS),
+       SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE),
        SNMP_MIB_SENTINEL
 };
 
index a4c4774..66aacb9 100644 (file)
@@ -2179,6 +2179,19 @@ martian_source:
        return err;
 }
 
+/* get device for dst_alloc with local routes */
+static struct net_device *ip_rt_get_dev(struct net *net,
+                                       const struct fib_result *res)
+{
+       struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
+       struct net_device *dev = NULL;
+
+       if (nhc)
+               dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
+
+       return dev ? : net->loopback_dev;
+}
+
 /*
  *     NOTE. We drop all the packets that has local source
  *     addresses, because every properly looped back packet
@@ -2335,7 +2348,7 @@ local_input:
                }
        }
 
-       rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
+       rth = rt_dst_alloc(ip_rt_get_dev(net, res),
                           flags | RTCF_LOCAL, res->type,
                           IN_DEV_ORCONF(in_dev, NOPOLICY), false);
        if (!rth)
index f258a4c..0a4f3f1 100644 (file)
@@ -786,6 +786,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        return inet_csk_complete_hashdance(sk, child, req, own_req);
 
 listen_overflow:
+       if (sk != req->rsk_listener)
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
+
        if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
                inet_rsk(req)->acked = 1;
                return NULL;
index 15f5504..1307ad0 100644 (file)
@@ -2607,6 +2607,9 @@ void udp_destroy_sock(struct sock *sk)
 {
        struct udp_sock *up = udp_sk(sk);
        bool slow = lock_sock_fast(sk);
+
+       /* protects from races with udp_abort() */
+       sock_set_flag(sk, SOCK_DEAD);
        udp_flush_pending_frames(sk);
        unlock_sock_fast(sk, slow);
        if (static_branch_unlikely(&udp_encap_needed_key)) {
@@ -2857,10 +2860,17 @@ int udp_abort(struct sock *sk, int err)
 {
        lock_sock(sk);
 
+       /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
+        * with close()
+        */
+       if (sock_flag(sk, SOCK_DEAD))
+               goto out;
+
        sk->sk_err = err;
        sk->sk_error_report(sk);
        __udp_disconnect(sk, 0);
 
+out:
        release_sock(sk);
 
        return 0;
index fb0648e..f4555a8 100644 (file)
@@ -42,7 +42,6 @@ static void ipip_destroy(struct xfrm_state *x)
 }
 
 static const struct xfrm_type ipip_type = {
-       .description    = "IPIP",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_IPIP,
        .init_state     = ipip_init_state,
index 0485709..3bf685f 100644 (file)
@@ -5827,7 +5827,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
                return -EAFNOSUPPORT;
 
        if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
-               BUG();
+               return -EINVAL;
 
        if (tb[IFLA_INET6_TOKEN]) {
                err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
index 20d492d..828e625 100644 (file)
@@ -755,7 +755,6 @@ static int ah6_rcv_cb(struct sk_buff *skb, int err)
 }
 
 static const struct xfrm_type ah6_type = {
-       .description    = "AH6",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_AH,
        .flags          = XFRM_TYPE_REPLAY_PROT,
@@ -763,7 +762,6 @@ static const struct xfrm_type ah6_type = {
        .destructor     = ah6_destroy,
        .input          = ah6_input,
        .output         = ah6_output,
-       .hdr_offset     = xfrm6_find_1stfragopt,
 };
 
 static struct xfrm6_protocol ah6_protocol = {
index 393ae2b..37c4b17 100644 (file)
@@ -1243,7 +1243,6 @@ static int esp6_rcv_cb(struct sk_buff *skb, int err)
 }
 
 static const struct xfrm_type esp6_type = {
-       .description    = "ESP6",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_ESP,
        .flags          = XFRM_TYPE_REPLAY_PROT,
@@ -1251,7 +1250,6 @@ static const struct xfrm_type esp6_type = {
        .destructor     = esp6_destroy,
        .input          = esp6_input,
        .output         = esp6_output,
-       .hdr_offset     = xfrm6_find_1stfragopt,
 };
 
 static struct xfrm6_protocol esp6_protocol = {
index 40ed4fc..a349d47 100644 (file)
@@ -377,7 +377,6 @@ static const struct net_offload esp6_offload = {
 };
 
 static const struct xfrm_type_offload esp6_type_offload = {
-       .description    = "ESP6 OFFLOAD",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_ESP,
        .input_tail     = esp6_input_tail,
index e8398ff..a7c31ab 100644 (file)
@@ -725,6 +725,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        struct ipcm6_cookie ipc6;
        u32 mark = IP6_REPLY_MARK(net, skb->mark);
        bool acast;
+       u8 type;
 
        if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
            net->ipv6.sysctl.icmpv6_echo_ignore_multicast)
@@ -740,8 +741,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
            !(net->ipv6.sysctl.anycast_src_echo_reply && acast))
                saddr = NULL;
 
+       if (icmph->icmp6_type == ICMPV6_EXT_ECHO_REQUEST)
+               type = ICMPV6_EXT_ECHO_REPLY;
+       else
+               type = ICMPV6_ECHO_REPLY;
+
        memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
-       tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
+       tmp_hdr.icmp6_type = type;
 
        memset(&fl6, 0, sizeof(fl6));
        if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES)
@@ -752,7 +758,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        if (saddr)
                fl6.saddr = *saddr;
        fl6.flowi6_oif = icmp6_iif(skb);
-       fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
+       fl6.fl6_icmp_type = type;
        fl6.flowi6_mark = mark;
        fl6.flowi6_uid = sock_net_uid(net, NULL);
        security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
@@ -783,13 +789,17 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
 
        msg.skb = skb;
        msg.offset = 0;
-       msg.type = ICMPV6_ECHO_REPLY;
+       msg.type = type;
 
        ipcm6_init_sk(&ipc6, np);
        ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
        ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
        ipc6.sockc.mark = mark;
 
+       if (icmph->icmp6_type == ICMPV6_EXT_ECHO_REQUEST)
+               if (!icmp_build_probe(skb, (struct icmphdr *)&tmp_hdr))
+                       goto out_dst_release;
+
        if (ip6_append_data(sk, icmpv6_getfrag, &msg,
                            skb->len + sizeof(struct icmp6hdr),
                            sizeof(struct icmp6hdr), &ipc6, &fl6,
@@ -911,6 +921,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
                if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
                        icmpv6_echo_reply(skb);
                break;
+       case ICMPV6_EXT_ECHO_REQUEST:
+               if (!net->ipv6.sysctl.icmpv6_echo_ignore_all &&
+                   net->ipv4.sysctl_icmp_echo_enable_probe)
+                       icmpv6_echo_reply(skb);
+               break;
 
        case ICMPV6_ECHO_REPLY:
                success = ping_rcv(skb);
index ff4f9eb..984050f 100644 (file)
@@ -1055,13 +1055,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
         * ip6_route_output will fail given src=any saddr, though, so
         * that's why we try it again later.
         */
-       if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
+       if (ipv6_addr_any(&fl6->saddr)) {
                struct fib6_info *from;
                struct rt6_info *rt;
-               bool had_dst = *dst != NULL;
 
-               if (!had_dst)
-                       *dst = ip6_route_output(net, sk, fl6);
+               *dst = ip6_route_output(net, sk, fl6);
                rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
 
                rcu_read_lock();
@@ -1078,7 +1076,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
                 * never existed and let the SA-enabled version take
                 * over.
                 */
-               if (!had_dst && (*dst)->error) {
+               if ((*dst)->error) {
                        dst_release(*dst);
                        *dst = NULL;
                }
@@ -1555,7 +1553,7 @@ emsgsize:
                        unsigned int datalen;
                        unsigned int fraglen;
                        unsigned int fraggap;
-                       unsigned int alloclen;
+                       unsigned int alloclen, alloc_extra;
                        unsigned int pagedlen;
 alloc_new_skb:
                        /* There's no room in the current skb */
@@ -1582,17 +1580,28 @@ alloc_new_skb:
                        fraglen = datalen + fragheaderlen;
                        pagedlen = 0;
 
+                       alloc_extra = hh_len;
+                       alloc_extra += dst_exthdrlen;
+                       alloc_extra += rt->dst.trailer_len;
+
+                       /* We just reserve space for fragment header.
+                        * Note: this may be overallocation if the message
+                        * (without MSG_MORE) fits into the MTU.
+                        */
+                       alloc_extra += sizeof(struct frag_hdr);
+
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
                                alloclen = mtu;
-                       else if (!paged)
+                       else if (!paged &&
+                                (fraglen + alloc_extra < SKB_MAX_ALLOC ||
+                                 !(rt->dst.dev->features & NETIF_F_SG)))
                                alloclen = fraglen;
                        else {
                                alloclen = min_t(int, fraglen, MAX_HEADER);
                                pagedlen = fraglen - alloclen;
                        }
-
-                       alloclen += dst_exthdrlen;
+                       alloclen += alloc_extra;
 
                        if (datalen != length + fraggap) {
                                /*
@@ -1602,30 +1611,21 @@ alloc_new_skb:
                                datalen += rt->dst.trailer_len;
                        }
 
-                       alloclen += rt->dst.trailer_len;
                        fraglen = datalen + fragheaderlen;
 
-                       /*
-                        * We just reserve space for fragment header.
-                        * Note: this may be overallocation if the message
-                        * (without MSG_MORE) fits into the MTU.
-                        */
-                       alloclen += sizeof(struct frag_hdr);
-
                        copy = datalen - transhdrlen - fraggap - pagedlen;
                        if (copy < 0) {
                                err = -EINVAL;
                                goto error;
                        }
                        if (transhdrlen) {
-                               skb = sock_alloc_send_skb(sk,
-                                               alloclen + hh_len,
+                               skb = sock_alloc_send_skb(sk, alloclen,
                                                (flags & MSG_DONTWAIT), &err);
                        } else {
                                skb = NULL;
                                if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
                                    2 * sk->sk_sndbuf)
-                                       skb = alloc_skb(alloclen + hh_len,
+                                       skb = alloc_skb(alloclen,
                                                        sk->sk_allocation);
                                if (unlikely(!skb))
                                        err = -ENOBUFS;
index 288bafd..0b8a386 100644 (file)
@@ -837,6 +837,7 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
                skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
        } else {
                skb->dev = tunnel->dev;
+               skb_reset_mac_header(skb);
        }
 
        skb_reset_network_header(skb);
index daef890..15f984b 100644 (file)
@@ -172,14 +172,12 @@ static int ipcomp6_rcv_cb(struct sk_buff *skb, int err)
 }
 
 static const struct xfrm_type ipcomp6_type = {
-       .description    = "IPCOMP6",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_COMP,
        .init_state     = ipcomp6_init_state,
        .destructor     = ipcomp_destroy,
        .input          = ipcomp_input,
        .output         = ipcomp_output,
-       .hdr_offset     = xfrm6_find_1stfragopt,
 };
 
 static struct xfrm6_protocol ipcomp6_protocol = {
index 878fcec..aeb35d2 100644 (file)
@@ -247,54 +247,6 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
        return err;
 }
 
-static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
-                              u8 **nexthdr)
-{
-       u16 offset = sizeof(struct ipv6hdr);
-       struct ipv6_opt_hdr *exthdr =
-                                  (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
-       const unsigned char *nh = skb_network_header(skb);
-       unsigned int packet_len = skb_tail_pointer(skb) -
-               skb_network_header(skb);
-       int found_rhdr = 0;
-
-       *nexthdr = &ipv6_hdr(skb)->nexthdr;
-
-       while (offset + 1 <= packet_len) {
-
-               switch (**nexthdr) {
-               case NEXTHDR_HOP:
-                       break;
-               case NEXTHDR_ROUTING:
-                       found_rhdr = 1;
-                       break;
-               case NEXTHDR_DEST:
-                       /*
-                        * HAO MUST NOT appear more than once.
-                        * XXX: It is better to try to find by the end of
-                        * XXX: packet if HAO exists.
-                        */
-                       if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
-                               net_dbg_ratelimited("mip6: hao exists already, override\n");
-                               return offset;
-                       }
-
-                       if (found_rhdr)
-                               return offset;
-
-                       break;
-               default:
-                       return offset;
-               }
-
-               offset += ipv6_optlen(exthdr);
-               *nexthdr = &exthdr->nexthdr;
-               exthdr = (struct ipv6_opt_hdr *)(nh + offset);
-       }
-
-       return offset;
-}
-
 static int mip6_destopt_init_state(struct xfrm_state *x)
 {
        if (x->id.spi) {
@@ -324,7 +276,6 @@ static void mip6_destopt_destroy(struct xfrm_state *x)
 }
 
 static const struct xfrm_type mip6_destopt_type = {
-       .description    = "MIP6DESTOPT",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_DSTOPTS,
        .flags          = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR,
@@ -333,7 +284,6 @@ static const struct xfrm_type mip6_destopt_type = {
        .input          = mip6_destopt_input,
        .output         = mip6_destopt_output,
        .reject         = mip6_destopt_reject,
-       .hdr_offset     = mip6_destopt_offset,
 };
 
 static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
@@ -383,53 +333,6 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
        return 0;
 }
 
-static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
-                            u8 **nexthdr)
-{
-       u16 offset = sizeof(struct ipv6hdr);
-       struct ipv6_opt_hdr *exthdr =
-                                  (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
-       const unsigned char *nh = skb_network_header(skb);
-       unsigned int packet_len = skb_tail_pointer(skb) -
-               skb_network_header(skb);
-       int found_rhdr = 0;
-
-       *nexthdr = &ipv6_hdr(skb)->nexthdr;
-
-       while (offset + 1 <= packet_len) {
-
-               switch (**nexthdr) {
-               case NEXTHDR_HOP:
-                       break;
-               case NEXTHDR_ROUTING:
-                       if (offset + 3 <= packet_len) {
-                               struct ipv6_rt_hdr *rt;
-                               rt = (struct ipv6_rt_hdr *)(nh + offset);
-                               if (rt->type != 0)
-                                       return offset;
-                       }
-                       found_rhdr = 1;
-                       break;
-               case NEXTHDR_DEST:
-                       if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
-                               return offset;
-
-                       if (found_rhdr)
-                               return offset;
-
-                       break;
-               default:
-                       return offset;
-               }
-
-               offset += ipv6_optlen(exthdr);
-               *nexthdr = &exthdr->nexthdr;
-               exthdr = (struct ipv6_opt_hdr *)(nh + offset);
-       }
-
-       return offset;
-}
-
 static int mip6_rthdr_init_state(struct xfrm_state *x)
 {
        if (x->id.spi) {
@@ -456,7 +359,6 @@ static void mip6_rthdr_destroy(struct xfrm_state *x)
 }
 
 static const struct xfrm_type mip6_rthdr_type = {
-       .description    = "MIP6RT",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_ROUTING,
        .flags          = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR,
@@ -464,7 +366,6 @@ static const struct xfrm_type mip6_rthdr_type = {
        .destructor     = mip6_rthdr_destroy,
        .input          = mip6_rthdr_input,
        .output         = mip6_rthdr_output,
-       .hdr_offset     = mip6_rthdr_offset,
 };
 
 static int __init mip6_init(void)
index e204163..92f3235 100644 (file)
@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
 }
 EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
 
+static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
+{
+       if (likely(next != IPPROTO_ICMPV6))
+               return false;
+
+       if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
+               return false;
+
+       return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
+}
+
 void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
                   const struct nft_pktinfo *pkt)
 {
@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
 
        lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
 
-       if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
-           nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
-               nft_fib_store_result(dest, priv, nft_in(pkt));
-               return;
+       if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
+           nft_hook(pkt) == NF_INET_INGRESS) {
+               if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
+                   nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
+                       nft_fib_store_result(dest, priv, nft_in(pkt));
+                       return;
+               }
        }
 
        *dest = 0;
index 4ff38cb..60bf3b8 100644 (file)
@@ -87,10 +87,10 @@ struct seg6_end_dt_info {
        int vrf_ifindex;
        int vrf_table;
 
-       /* tunneled packet proto and family (IPv4 or IPv6) */
-       __be16 proto;
+       /* tunneled packet family (IPv4 or IPv6).
+        * Protocol and header length are inferred from family.
+        */
        u16 family;
-       int hdrlen;
 };
 
 struct pcpu_seg6_local_counters {
@@ -521,19 +521,6 @@ static int __seg6_end_dt_vrf_build(struct seg6_local_lwt *slwt, const void *cfg,
        info->net = net;
        info->vrf_ifindex = vrf_ifindex;
 
-       switch (family) {
-       case AF_INET:
-               info->proto = htons(ETH_P_IP);
-               info->hdrlen = sizeof(struct iphdr);
-               break;
-       case AF_INET6:
-               info->proto = htons(ETH_P_IPV6);
-               info->hdrlen = sizeof(struct ipv6hdr);
-               break;
-       default:
-               return -EINVAL;
-       }
-
        info->family = family;
        info->mode = DT_VRF_MODE;
 
@@ -622,22 +609,44 @@ error:
 }
 
 static struct sk_buff *end_dt_vrf_core(struct sk_buff *skb,
-                                      struct seg6_local_lwt *slwt)
+                                      struct seg6_local_lwt *slwt, u16 family)
 {
        struct seg6_end_dt_info *info = &slwt->dt_info;
        struct net_device *vrf;
+       __be16 protocol;
+       int hdrlen;
 
        vrf = end_dt_get_vrf_rcu(skb, info);
        if (unlikely(!vrf))
                goto drop;
 
-       skb->protocol = info->proto;
+       switch (family) {
+       case AF_INET:
+               protocol = htons(ETH_P_IP);
+               hdrlen = sizeof(struct iphdr);
+               break;
+       case AF_INET6:
+               protocol = htons(ETH_P_IPV6);
+               hdrlen = sizeof(struct ipv6hdr);
+               break;
+       case AF_UNSPEC:
+               fallthrough;
+       default:
+               goto drop;
+       }
+
+       if (unlikely(info->family != AF_UNSPEC && info->family != family)) {
+               pr_warn_once("seg6local: SRv6 End.DT* family mismatch");
+               goto drop;
+       }
+
+       skb->protocol = protocol;
 
        skb_dst_drop(skb);
 
-       skb_set_transport_header(skb, info->hdrlen);
+       skb_set_transport_header(skb, hdrlen);
 
-       return end_dt_vrf_rcv(skb, info->family, vrf);
+       return end_dt_vrf_rcv(skb, family, vrf);
 
 drop:
        kfree_skb(skb);
@@ -656,7 +665,7 @@ static int input_action_end_dt4(struct sk_buff *skb,
        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
                goto drop;
 
-       skb = end_dt_vrf_core(skb, slwt);
+       skb = end_dt_vrf_core(skb, slwt, AF_INET);
        if (!skb)
                /* packet has been processed and consumed by the VRF */
                return 0;
@@ -739,7 +748,7 @@ static int input_action_end_dt6(struct sk_buff *skb,
                goto legacy_mode;
 
        /* DT6_VRF_MODE */
-       skb = end_dt_vrf_core(skb, slwt);
+       skb = end_dt_vrf_core(skb, slwt, AF_INET6);
        if (!skb)
                /* packet has been processed and consumed by the VRF */
                return 0;
@@ -767,6 +776,36 @@ drop:
        return -EINVAL;
 }
 
+#ifdef CONFIG_NET_L3_MASTER_DEV
+static int seg6_end_dt46_build(struct seg6_local_lwt *slwt, const void *cfg,
+                              struct netlink_ext_ack *extack)
+{
+       return __seg6_end_dt_vrf_build(slwt, cfg, AF_UNSPEC, extack);
+}
+
+static int input_action_end_dt46(struct sk_buff *skb,
+                                struct seg6_local_lwt *slwt)
+{
+       unsigned int off = 0;
+       int nexthdr;
+
+       nexthdr = ipv6_find_hdr(skb, &off, -1, NULL, NULL);
+       if (unlikely(nexthdr < 0))
+               goto drop;
+
+       switch (nexthdr) {
+       case IPPROTO_IPIP:
+               return input_action_end_dt4(skb, slwt);
+       case IPPROTO_IPV6:
+               return input_action_end_dt6(skb, slwt);
+       }
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+#endif
+
 /* push an SRH on top of the current one */
 static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
 {
@@ -969,6 +1008,17 @@ static struct seg6_action_desc seg6_action_table[] = {
                .input          = input_action_end_dt6,
        },
        {
+               .action         = SEG6_LOCAL_ACTION_END_DT46,
+               .attrs          = SEG6_F_ATTR(SEG6_LOCAL_VRFTABLE),
+               .optattrs       = SEG6_F_LOCAL_COUNTERS,
+#ifdef CONFIG_NET_L3_MASTER_DEV
+               .input          = input_action_end_dt46,
+               .slwt_ops       = {
+                                       .build_state = seg6_end_dt46_build,
+                                 },
+#endif
+       },
+       {
                .action         = SEG6_LOCAL_ACTION_END_B6,
                .attrs          = SEG6_F_ATTR(SEG6_LOCAL_SRH),
                .optattrs       = SEG6_F_LOCAL_COUNTERS,
index e0a39b0..df5bea8 100644 (file)
@@ -710,6 +710,8 @@ static int ipip6_rcv(struct sk_buff *skb)
                 * old iph is no longer valid
                 */
                iph = (const struct iphdr *)skb_mac_header(skb);
+               skb_reset_mac_header(skb);
+
                err = IP_ECN_decapsulate(iph, skb);
                if (unlikely(err)) {
                        if (log_ecn_error)
@@ -780,6 +782,8 @@ static int sit_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
                        tpi = &ipip_tpi;
                if (iptunnel_pull_header(skb, 0, tpi->proto, false))
                        goto drop;
+               skb_reset_mac_header(skb);
+
                return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
        }
 
index 199b080..3fcd86f 100644 (file)
@@ -1598,6 +1598,9 @@ void udpv6_destroy_sock(struct sock *sk)
 {
        struct udp_sock *up = udp_sk(sk);
        lock_sock(sk);
+
+       /* protects from races with udp_abort() */
+       sock_set_flag(sk, SOCK_DEAD);
        udp_v6_flush_pending_frames(sk);
        release_sock(sk);
 
index 8b84d53..57fa27c 100644 (file)
 #include <net/ip6_route.h>
 #include <net/xfrm.h>
 
-int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
-                         u8 **prevhdr)
-{
-       return ip6_find_1stfragopt(skb, prevhdr);
-}
-EXPORT_SYMBOL(xfrm6_find_1stfragopt);
-
 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
 {
        struct flowi6 fl6;
index f696d46..2b31112 100644 (file)
@@ -291,7 +291,6 @@ static void xfrm6_tunnel_destroy(struct xfrm_state *x)
 }
 
 static const struct xfrm_type xfrm6_tunnel_type = {
-       .description    = "IP6IP6",
        .owner          = THIS_MODULE,
        .proto          = IPPROTO_IPV6,
        .init_state     = xfrm6_tunnel_init_state,
index 1c572c8..6201965 100644 (file)
@@ -1066,11 +1066,6 @@ out_error:
                goto partial_message;
        }
 
-       if (skb_has_frag_list(head)) {
-               kfree_skb_list(skb_shinfo(head)->frag_list);
-               skb_shinfo(head)->frag_list = NULL;
-       }
-
        if (head != kcm->seq_skb)
                kfree_skb(head);
 
index ef9b4ac..de24a7d 100644 (file)
@@ -141,7 +141,6 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
        struct sock *sk;
        struct pfkey_sock *pfk;
-       int err;
 
        if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
@@ -150,10 +149,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
        if (protocol != PF_KEY_V2)
                return -EPROTONOSUPPORT;
 
-       err = -ENOMEM;
        sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern);
        if (sk == NULL)
-               goto out;
+               return -ENOMEM;
 
        pfk = pfkey_sk(sk);
        mutex_init(&pfk->dump_lock);
@@ -169,8 +167,6 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
        pfkey_insert(sk);
 
        return 0;
-out:
-       return err;
 }
 
 static int pfkey_release(struct socket *sock)
index 7a99892..84cc773 100644 (file)
@@ -1442,6 +1442,38 @@ static void sta_apply_mesh_params(struct ieee80211_local *local,
 #endif
 }
 
+static void sta_apply_airtime_params(struct ieee80211_local *local,
+                                    struct sta_info *sta,
+                                    struct station_parameters *params)
+{
+       u8 ac;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               struct airtime_sched_info *air_sched = &local->airtime[ac];
+               struct airtime_info *air_info = &sta->airtime[ac];
+               struct txq_info *txqi;
+               u8 tid;
+
+               spin_lock_bh(&air_sched->lock);
+               for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
+                       if (air_info->weight == params->airtime_weight ||
+                           !sta->sta.txq[tid] ||
+                           ac != ieee80211_ac_from_tid(tid))
+                               continue;
+
+                       airtime_weight_set(air_info, params->airtime_weight);
+
+                       txqi = to_txq_info(sta->sta.txq[tid]);
+                       if (RB_EMPTY_NODE(&txqi->schedule_order))
+                               continue;
+
+                       ieee80211_update_airtime_weight(local, air_sched,
+                                                       0, true);
+               }
+               spin_unlock_bh(&air_sched->lock);
+       }
+}
+
 static int sta_apply_parameters(struct ieee80211_local *local,
                                struct sta_info *sta,
                                struct station_parameters *params)
@@ -1629,7 +1661,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                sta_apply_mesh_params(local, sta, params);
 
        if (params->airtime_weight)
-               sta->airtime_weight = params->airtime_weight;
+               sta_apply_airtime_params(local, sta, params);
+
 
        /* set the STA state after all sta info from usermode has been set */
        if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
@@ -1693,15 +1726,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
            test_sta_flag(sta, WLAN_STA_ASSOC))
                rate_control_rate_init(sta);
 
-       err = sta_info_insert_rcu(sta);
-       if (err) {
-               rcu_read_unlock();
-               return err;
-       }
-
-       rcu_read_unlock();
-
-       return 0;
+       return sta_info_insert(sta);
 }
 
 static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
index 907bb1f..76fc36a 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * mac80211 - channel management
+ * Copyright 2020 - 2021 Intel Corporation
  */
 
 #include <linux/nl80211.h>
@@ -308,8 +309,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
  * the max of min required widths of all the interfaces bound to this
  * channel context.
  */
-void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
-                                     struct ieee80211_chanctx *ctx)
+static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+                                            struct ieee80211_chanctx *ctx)
 {
        enum nl80211_chan_width max_bw;
        struct cfg80211_chan_def min_def;
@@ -326,7 +327,7 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
            ctx->conf.def.width == NL80211_CHAN_WIDTH_16 ||
            ctx->conf.radar_enabled) {
                ctx->conf.min_def = ctx->conf.def;
-               return;
+               return 0;
        }
 
        max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
@@ -337,17 +338,21 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
                ieee80211_chandef_downgrade(&min_def);
 
        if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
-               return;
+               return 0;
 
        ctx->conf.min_def = min_def;
        if (!ctx->driver_present)
-               return;
+               return 0;
 
-       drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH);
+       return IEEE80211_CHANCTX_CHANGE_MIN_WIDTH;
 }
 
+/* calling this function is assuming that station vif is updated to
+ * lates changes by calling ieee80211_vif_update_chandef
+ */
 static void ieee80211_chan_bw_change(struct ieee80211_local *local,
-                                    struct ieee80211_chanctx *ctx)
+                                    struct ieee80211_chanctx *ctx,
+                                    bool narrowed)
 {
        struct sta_info *sta;
        struct ieee80211_supported_band *sband =
@@ -366,9 +371,16 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
                        continue;
 
                new_sta_bw = ieee80211_sta_cur_vht_bw(sta);
+
+               /* nothing change */
                if (new_sta_bw == sta->sta.bandwidth)
                        continue;
 
+               /* vif changed to narrow BW and narrow BW for station wasn't
+                * requested or vise versa */
+               if ((new_sta_bw < sta->sta.bandwidth) == !narrowed)
+                       continue;
+
                sta->sta.bandwidth = new_sta_bw;
                rate_control_rate_update(local, sband, sta,
                                         IEEE80211_RC_BW_CHANGED);
@@ -376,21 +388,34 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
        rcu_read_unlock();
 }
 
-static void ieee80211_change_chanctx(struct ieee80211_local *local,
-                                    struct ieee80211_chanctx *ctx,
-                                    const struct cfg80211_chan_def *chandef)
+/*
+ * recalc the min required chan width of the channel context, which is
+ * the max of min required widths of all the interfaces bound to this
+ * channel context.
+ */
+void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+                                     struct ieee80211_chanctx *ctx)
 {
-       enum nl80211_chan_width width;
+       u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx);
 
-       if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
-               ieee80211_recalc_chanctx_min_def(local, ctx);
+       if (!changed)
                return;
-       }
 
-       WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+       /* check is BW narrowed */
+       ieee80211_chan_bw_change(local, ctx, true);
 
-       width = ctx->conf.def.width;
-       ctx->conf.def = *chandef;
+       drv_change_chanctx(local, ctx, changed);
+
+       /* check is BW wider */
+       ieee80211_chan_bw_change(local, ctx, false);
+}
+
+static void ieee80211_change_chanctx(struct ieee80211_local *local,
+                                    struct ieee80211_chanctx *ctx,
+                                    struct ieee80211_chanctx *old_ctx,
+                                    const struct cfg80211_chan_def *chandef)
+{
+       u32 changed;
 
        /* expected to handle only 20/40/80/160 channel widths */
        switch (chandef->width) {
@@ -405,19 +430,33 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
                WARN_ON(1);
        }
 
-       if (chandef->width < width)
-               ieee80211_chan_bw_change(local, ctx);
+       /* Check maybe BW narrowed - we do this _before_ calling recalc_chanctx_min_def
+        * due to maybe not returning from it, e.g in case new context was added
+        * first time with all parameters up to date.
+        */
+       ieee80211_chan_bw_change(local, old_ctx, true);
+
+       if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+               ieee80211_recalc_chanctx_min_def(local, ctx);
+               return;
+       }
 
-       drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
-       ieee80211_recalc_chanctx_min_def(local, ctx);
+       WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+
+       ctx->conf.def = *chandef;
+
+       /* check if min chanctx also changed */
+       changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
+                 _ieee80211_recalc_chanctx_min_def(local, ctx);
+       drv_change_chanctx(local, ctx, changed);
 
        if (!local->use_chanctx) {
                local->_oper_chandef = *chandef;
                ieee80211_hw_config(local, 0);
        }
 
-       if (chandef->width > width)
-               ieee80211_chan_bw_change(local, ctx);
+       /* check is BW wider */
+       ieee80211_chan_bw_change(local, old_ctx, false);
 }
 
 static struct ieee80211_chanctx *
@@ -450,7 +489,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
                if (!compat)
                        continue;
 
-               ieee80211_change_chanctx(local, ctx, compat);
+               ieee80211_change_chanctx(local, ctx, ctx, compat);
 
                return ctx;
        }
@@ -679,7 +718,7 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
        if (!compat)
                return;
 
-       ieee80211_change_chanctx(local, ctx, compat);
+       ieee80211_change_chanctx(local, ctx, ctx, compat);
 }
 
 static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
@@ -1107,13 +1146,12 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
        if (WARN_ON(!chandef))
                return -EINVAL;
 
-       if (old_ctx->conf.def.width > new_ctx->conf.def.width)
-               ieee80211_chan_bw_change(local, new_ctx);
+       if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
+               changed = BSS_CHANGED_BANDWIDTH;
 
-       ieee80211_change_chanctx(local, new_ctx, chandef);
+       ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
 
-       if (old_ctx->conf.def.width < new_ctx->conf.def.width)
-               ieee80211_chan_bw_change(local, new_ctx);
+       ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef);
 
        vif_chsw[0].vif = &sdata->vif;
        vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1142,14 +1180,9 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
        if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
                ieee80211_free_chanctx(local, old_ctx);
 
-       if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
-               changed = BSS_CHANGED_BANDWIDTH;
-
-       ieee80211_vif_update_chandef(sdata, &sdata->reserved_chandef);
-
+       ieee80211_recalc_chanctx_min_def(local, new_ctx);
        ieee80211_recalc_smps_chanctx(local, new_ctx);
        ieee80211_recalc_radar_chanctx(local, new_ctx);
-       ieee80211_recalc_chanctx_min_def(local, new_ctx);
 
        if (changed)
                ieee80211_bss_info_change_notify(sdata, changed);
@@ -1188,7 +1221,7 @@ ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
        if (WARN_ON(!chandef))
                return -EINVAL;
 
-       ieee80211_change_chanctx(local, new_ctx, chandef);
+       ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef);
 
        list_del(&sdata->reserved_chanctx_list);
        sdata->reserved_chanctx = NULL;
@@ -1505,7 +1538,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
                ieee80211_recalc_smps_chanctx(local, ctx);
                ieee80211_recalc_radar_chanctx(local, ctx);
                ieee80211_recalc_chanctx_min_def(local, ctx);
-               ieee80211_chan_bw_change(local, ctx);
 
                list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
                                         reserved_chanctx_list) {
index 9245c04..8dbfe32 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
  */
 
 #include <linux/debugfs.h>
@@ -216,14 +216,14 @@ static ssize_t aql_txq_limit_read(struct file *file,
                        "VI     %u              %u\n"
                        "BE     %u              %u\n"
                        "BK     %u              %u\n",
-                       local->aql_txq_limit_low[IEEE80211_AC_VO],
-                       local->aql_txq_limit_high[IEEE80211_AC_VO],
-                       local->aql_txq_limit_low[IEEE80211_AC_VI],
-                       local->aql_txq_limit_high[IEEE80211_AC_VI],
-                       local->aql_txq_limit_low[IEEE80211_AC_BE],
-                       local->aql_txq_limit_high[IEEE80211_AC_BE],
-                       local->aql_txq_limit_low[IEEE80211_AC_BK],
-                       local->aql_txq_limit_high[IEEE80211_AC_BK]);
+                       local->airtime[IEEE80211_AC_VO].aql_txq_limit_low,
+                       local->airtime[IEEE80211_AC_VO].aql_txq_limit_high,
+                       local->airtime[IEEE80211_AC_VI].aql_txq_limit_low,
+                       local->airtime[IEEE80211_AC_VI].aql_txq_limit_high,
+                       local->airtime[IEEE80211_AC_BE].aql_txq_limit_low,
+                       local->airtime[IEEE80211_AC_BE].aql_txq_limit_high,
+                       local->airtime[IEEE80211_AC_BK].aql_txq_limit_low,
+                       local->airtime[IEEE80211_AC_BK].aql_txq_limit_high);
        return simple_read_from_buffer(user_buf, count, ppos,
                                       buf, len);
 }
@@ -255,11 +255,11 @@ static ssize_t aql_txq_limit_write(struct file *file,
        if (ac >= IEEE80211_NUM_ACS)
                return -EINVAL;
 
-       q_limit_low_old = local->aql_txq_limit_low[ac];
-       q_limit_high_old = local->aql_txq_limit_high[ac];
+       q_limit_low_old = local->airtime[ac].aql_txq_limit_low;
+       q_limit_high_old = local->airtime[ac].aql_txq_limit_high;
 
-       local->aql_txq_limit_low[ac] = q_limit_low;
-       local->aql_txq_limit_high[ac] = q_limit_high;
+       local->airtime[ac].aql_txq_limit_low = q_limit_low;
+       local->airtime[ac].aql_txq_limit_high = q_limit_high;
 
        mutex_lock(&local->sta_mtx);
        list_for_each_entry(sta, &local->sta_list, list) {
@@ -382,15 +382,62 @@ static const struct file_operations force_tx_status_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t airtime_read(struct file *file,
+                           char __user *user_buf,
+                           size_t count,
+                           loff_t *ppos)
+{
+       struct ieee80211_local *local = file->private_data;
+       char buf[200];
+       u64 v_t[IEEE80211_NUM_ACS];
+       u64 wt[IEEE80211_NUM_ACS];
+       int len = 0, ac;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               spin_lock_bh(&local->airtime[ac].lock);
+               v_t[ac] = local->airtime[ac].v_t;
+               wt[ac] = local->airtime[ac].weight_sum;
+               spin_unlock_bh(&local->airtime[ac].lock);
+       }
+       len = scnprintf(buf, sizeof(buf),
+                       "\tVO         VI         BE         BK\n"
+                       "Virt-t\t%-10llu %-10llu %-10llu %-10llu\n"
+                       "Weight\t%-10llu %-10llu %-10llu %-10llu\n",
+                       v_t[0],
+                       v_t[1],
+                       v_t[2],
+                       v_t[3],
+                       wt[0],
+                       wt[1],
+                       wt[2],
+                       wt[3]);
+
+       return simple_read_from_buffer(user_buf, count, ppos,
+                                      buf, len);
+}
+
+static const struct file_operations airtime_ops = {
+       .read = airtime_read,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
 #ifdef CONFIG_PM
 static ssize_t reset_write(struct file *file, const char __user *user_buf,
                           size_t count, loff_t *ppos)
 {
        struct ieee80211_local *local = file->private_data;
+       int ret;
 
        rtnl_lock();
+       wiphy_lock(local->hw.wiphy);
        __ieee80211_suspend(&local->hw, NULL);
-       __ieee80211_resume(&local->hw);
+       ret = __ieee80211_resume(&local->hw);
+       wiphy_unlock(local->hw.wiphy);
+
+       if (ret)
+               cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
        rtnl_unlock();
 
        return count;
@@ -625,7 +672,11 @@ void debugfs_hw_add(struct ieee80211_local *local)
        if (local->ops->wake_tx_queue)
                DEBUGFS_ADD_MODE(aqm, 0600);
 
-       DEBUGFS_ADD_MODE(airtime_flags, 0600);
+       if (wiphy_ext_feature_isset(local->hw.wiphy,
+                                   NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
+               DEBUGFS_ADD_MODE(airtime, 0600);
+               DEBUGFS_ADD_MODE(airtime_flags, 0600);
+       }
 
        DEBUGFS_ADD(aql_txq_limit);
        debugfs_create_u32("aql_threshold", 0600,
index 0ad3860..db724fc 100644 (file)
@@ -57,7 +57,6 @@ static ssize_t ieee80211_if_write(
                return -EFAULT;
        buf[count] = '\0';
 
-       ret = -ENODEV;
        rtnl_lock();
        ret = (*write)(sdata, buf, count);
        rtnl_unlock();
@@ -513,6 +512,34 @@ static ssize_t ieee80211_if_fmt_aqm(
 }
 IEEE80211_IF_FILE_R(aqm);
 
+static ssize_t ieee80211_if_fmt_airtime(
+       const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_txq *txq = sdata->vif.txq;
+       struct airtime_info *air_info;
+       int len;
+
+       if (!txq)
+               return 0;
+
+       spin_lock_bh(&local->airtime[txq->ac].lock);
+       air_info = to_airtime_info(txq);
+       len = scnprintf(buf,
+                       buflen,
+                       "RX: %llu us\nTX: %llu us\nWeight: %u\n"
+                       "Virt-T: %lld us\n",
+                       air_info->rx_airtime,
+                       air_info->tx_airtime,
+                       air_info->weight,
+                       air_info->v_t);
+       spin_unlock_bh(&local->airtime[txq->ac].lock);
+
+       return len;
+}
+
+IEEE80211_IF_FILE_R(airtime);
+
 IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
 
 /* IBSS attributes */
@@ -658,8 +685,10 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
 
        if (sdata->local->ops->wake_tx_queue &&
            sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
-           sdata->vif.type != NL80211_IFTYPE_NAN)
+           sdata->vif.type != NL80211_IFTYPE_NAN) {
                DEBUGFS_ADD(aqm);
+               DEBUGFS_ADD(airtime);
+       }
 }
 
 static void add_sta_files(struct ieee80211_sub_if_data *sdata)
index 936c9df..8be28cf 100644 (file)
@@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
        size_t bufsz = 400;
        char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
        u64 rx_airtime = 0, tx_airtime = 0;
-       s64 deficit[IEEE80211_NUM_ACS];
+       u64 v_t[IEEE80211_NUM_ACS];
        ssize_t rv;
        int ac;
 
@@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf,
                return -ENOMEM;
 
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               spin_lock_bh(&local->active_txq_lock[ac]);
+               spin_lock_bh(&local->airtime[ac].lock);
                rx_airtime += sta->airtime[ac].rx_airtime;
                tx_airtime += sta->airtime[ac].tx_airtime;
-               deficit[ac] = sta->airtime[ac].deficit;
-               spin_unlock_bh(&local->active_txq_lock[ac]);
+               v_t[ac] = sta->airtime[ac].v_t;
+               spin_unlock_bh(&local->airtime[ac].lock);
        }
 
        p += scnprintf(p, bufsz + buf - p,
                "RX: %llu us\nTX: %llu us\nWeight: %u\n"
-               "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
-               rx_airtime, tx_airtime, sta->airtime_weight,
-               deficit[0], deficit[1], deficit[2], deficit[3]);
+               "Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
+               rx_airtime, tx_airtime, sta->airtime[0].weight,
+               v_t[0], v_t[1], v_t[2], v_t[3]);
 
        rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
        kfree(buf);
@@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf,
        int ac;
 
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               spin_lock_bh(&local->active_txq_lock[ac]);
+               spin_lock_bh(&local->airtime[ac].lock);
                sta->airtime[ac].rx_airtime = 0;
                sta->airtime[ac].tx_airtime = 0;
-               sta->airtime[ac].deficit = sta->airtime_weight;
-               spin_unlock_bh(&local->active_txq_lock[ac]);
+               sta->airtime[ac].v_t = 0;
+               spin_unlock_bh(&local->airtime[ac].lock);
        }
 
        return count;
@@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file *file, char __user *userbuf,
                return -ENOMEM;
 
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               spin_lock_bh(&local->active_txq_lock[ac]);
+               spin_lock_bh(&local->airtime[ac].lock);
                q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
                q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
-               spin_unlock_bh(&local->active_txq_lock[ac]);
+               spin_unlock_bh(&local->airtime[ac].lock);
                q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
        }
 
index 604ca59..bcb7cc0 100644 (file)
@@ -2,7 +2,7 @@
 /*
 * Portions of this file
 * Copyright(c) 2016 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2019 Intel Corporation
+* Copyright (C) 2018 - 2019, 2021 Intel Corporation
 */
 
 #ifndef __MAC80211_DRIVER_OPS
@@ -821,7 +821,7 @@ drv_allow_buffered_frames(struct ieee80211_local *local,
 
 static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
                                      struct ieee80211_sub_if_data *sdata,
-                                     u16 duration)
+                                     struct ieee80211_prep_tx_info *info)
 {
        might_sleep();
 
@@ -829,9 +829,27 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
                return;
        WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
 
-       trace_drv_mgd_prepare_tx(local, sdata, duration);
+       trace_drv_mgd_prepare_tx(local, sdata, info->duration,
+                                info->subtype, info->success);
        if (local->ops->mgd_prepare_tx)
-               local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, duration);
+               local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, info);
+       trace_drv_return_void(local);
+}
+
+static inline void drv_mgd_complete_tx(struct ieee80211_local *local,
+                                      struct ieee80211_sub_if_data *sdata,
+                                      struct ieee80211_prep_tx_info *info)
+{
+       might_sleep();
+
+       if (!check_sdata_in_driver(sdata))
+               return;
+       WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+
+       trace_drv_mgd_complete_tx(local, sdata, info->duration,
+                                 info->subtype, info->success);
+       if (local->ops->mgd_complete_tx)
+               local->ops->mgd_complete_tx(&local->hw, &sdata->vif, info);
        trace_drv_return_void(local);
 }
 
index 0c0b970..c05af70 100644 (file)
@@ -111,7 +111,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
                                  struct sta_info *sta)
 {
        struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
-       struct ieee80211_sta_he_cap own_he_cap = sband->iftype_data->he_cap;
+       struct ieee80211_sta_he_cap own_he_cap;
        struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
        u8 he_ppe_size;
        u8 mcs_nss_size;
@@ -120,9 +120,13 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
 
        memset(he_cap, 0, sizeof(*he_cap));
 
-       if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband))
+       if (!he_cap_ie ||
+           !ieee80211_get_he_iftype_cap(sband,
+                                        ieee80211_vif_type_p2p(&sdata->vif)))
                return;
 
+       own_he_cap = sband->iftype_data->he_cap;
+
        /* Make sure size is OK */
        mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
        he_ppe_size =
index 3d62a80..2eb7641 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright 2017      Intel Deutschland GmbH
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2020-2021 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -555,17 +555,15 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
-       if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION &&
-                        vif->type != NL80211_IFTYPE_AP))
+       if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
                return;
 
-       if (vif->type == NL80211_IFTYPE_STATION) {
-               if (sdata->u.mgd.driver_smps_mode == smps_mode)
-                       return;
-               sdata->u.mgd.driver_smps_mode = smps_mode;
-               ieee80211_queue_work(&sdata->local->hw,
-                                    &sdata->u.mgd.request_smps_work);
-       }
+       if (sdata->u.mgd.driver_smps_mode == smps_mode)
+               return;
+
+       sdata->u.mgd.driver_smps_mode = smps_mode;
+       ieee80211_queue_work(&sdata->local->hw,
+                            &sdata->u.mgd.request_smps_work);
 }
 /* this might change ... don't want non-open drivers using it */
 EXPORT_SYMBOL_GPL(ieee80211_request_smps);
index 214404a..22549b9 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2015  Intel Mobile Communications GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #ifndef IEEE80211_I_H
@@ -831,17 +831,16 @@ enum txq_info_flags {
  * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
  *     a fq_flow which is already owned by a different tin
  * @def_cvars: codel vars for @def_flow
- * @frags: used to keep fragments created after dequeue
  * @schedule_order: used with ieee80211_local->active_txqs
- * @schedule_round: counter to prevent infinite loops on TXQ scheduling
+ * @frags: used to keep fragments created after dequeue
  */
 struct txq_info {
        struct fq_tin tin;
        struct codel_vars def_cvars;
        struct codel_stats cstats;
+       struct rb_node schedule_order;
+
        struct sk_buff_head frags;
-       struct list_head schedule_order;
-       u16 schedule_round;
        unsigned long flags;
 
        /* keep last! */
@@ -918,6 +917,8 @@ struct ieee80211_sub_if_data {
        struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
        struct mac80211_qos_map __rcu *qos_map;
 
+       struct airtime_info airtime[IEEE80211_NUM_ACS];
+
        struct work_struct csa_finalize_work;
        bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
        struct cfg80211_chan_def csa_chandef;
@@ -1130,6 +1131,44 @@ enum mac80211_scan_state {
        SCAN_ABORT,
 };
 
+/**
+ * struct airtime_sched_info - state used for airtime scheduling and AQL
+ *
+ * @lock: spinlock that protects all the fields in this struct
+ * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
+ * @schedule_pos: the current position maintained while a driver walks the tree
+ *                with ieee80211_next_txq()
+ * @active_list: list of struct airtime_info structs that were active within
+ *               the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
+ *               weight_sum
+ * @last_weight_update: used for rate limiting walking active_list
+ * @last_schedule_time: tracks the last time a transmission was scheduled; used
+ *                      for catching up v_t if no stations are eligible for
+ *                      transmission.
+ * @v_t: global virtual time; queues with v_t < this are eligible for
+ *       transmission
+ * @weight_sum: total sum of all active stations used for dividing airtime
+ * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast
+ *                         path - see comment above
+ *                         IEEE80211_RECIPROCAL_DIVISOR_64)
+ * @aql_txq_limit_low: AQL limit when total outstanding airtime
+ *                     is < IEEE80211_AQL_THRESHOLD
+ * @aql_txq_limit_high: AQL limit when total outstanding airtime
+ *                      is > IEEE80211_AQL_THRESHOLD
+ */
+struct airtime_sched_info {
+       spinlock_t lock;
+       struct rb_root_cached active_txqs;
+       struct rb_node *schedule_pos;
+       struct list_head active_list;
+       u64 last_weight_update;
+       u64 last_schedule_activity;
+       u64 v_t;
+       u64 weight_sum;
+       u64 weight_sum_reciprocal;
+       u32 aql_txq_limit_low;
+       u32 aql_txq_limit_high;
+};
 DECLARE_STATIC_KEY_FALSE(aql_disable);
 
 struct ieee80211_local {
@@ -1143,13 +1182,8 @@ struct ieee80211_local {
        struct codel_params cparams;
 
        /* protects active_txqs and txqi->schedule_order */
-       spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
-       struct list_head active_txqs[IEEE80211_NUM_ACS];
-       u16 schedule_round[IEEE80211_NUM_ACS];
-
+       struct airtime_sched_info airtime[IEEE80211_NUM_ACS];
        u16 airtime_flags;
-       u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
-       u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
        u32 aql_threshold;
        atomic_t aql_total_pending_airtime;
 
@@ -1414,10 +1448,6 @@ struct ieee80211_local {
 
        /* extended capabilities provided by mac80211 */
        u8 ext_capa[8];
-
-       /* TDLS channel switch */
-       struct work_struct tdls_chsw_work;
-       struct sk_buff_head skb_queue_tdls_chsw;
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1442,7 +1472,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 
-       if (WARN_ON_ONCE(!chanctx_conf)) {
+       if (!chanctx_conf) {
                rcu_read_unlock();
                return NULL;
        }
@@ -1567,6 +1597,125 @@ static inline bool txq_has_queue(struct ieee80211_txq *txq)
        return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
 }
 
+static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq)
+{
+       struct ieee80211_sub_if_data *sdata;
+       struct sta_info *sta;
+
+       if (txq->sta) {
+               sta = container_of(txq->sta, struct sta_info, sta);
+               return &sta->airtime[txq->ac];
+       }
+
+       sdata = vif_to_sdata(txq->vif);
+       return &sdata->airtime[txq->ac];
+}
+
+/* To avoid divisions in the fast path, we keep pre-computed reciprocals for
+ * airtime weight calculations. There are two different weights to keep track
+ * of: The per-station weight and the sum of weights per phy.
+ *
+ * For the per-station weights (kept in airtime_info below), we use 32-bit
+ * reciprocals with a devisor of 2^19. This lets us keep the multiplications and
+ * divisions for the station weights as 32-bit operations at the cost of a bit
+ * of rounding error for high weights; but the choice of divisor keeps rounding
+ * errors <10% for weights <2^15, assuming no more than 8ms of airtime is
+ * reported at a time.
+ *
+ * For the per-phy sum of weights the values can get higher, so we use 64-bit
+ * operations for those with a 32-bit divisor, which should avoid any
+ * significant rounding errors.
+ */
+#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL
+#define IEEE80211_RECIPROCAL_SHIFT_64 32
+#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U
+#define IEEE80211_RECIPROCAL_SHIFT_32 19
+
+static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight)
+{
+       if (air_info->weight == weight)
+               return;
+
+       air_info->weight = weight;
+       if (weight) {
+               air_info->weight_reciprocal =
+                       IEEE80211_RECIPROCAL_DIVISOR_32 / weight;
+       } else {
+               air_info->weight_reciprocal = 0;
+       }
+}
+
+static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched,
+                                         int weight_sum)
+{
+       if (air_sched->weight_sum == weight_sum)
+               return;
+
+       air_sched->weight_sum = weight_sum;
+       if (air_sched->weight_sum) {
+               air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64;
+               do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum);
+       } else {
+               air_sched->weight_sum_reciprocal = 0;
+       }
+}
+
+/* A problem when trying to enforce airtime fairness is that we want to divide
+ * the airtime between the currently *active* stations. However, basing this on
+ * the instantaneous queue state of stations doesn't work, as queues tend to
+ * oscillate very quickly between empty and occupied, leading to the scheduler
+ * thinking only a single station is active when deciding whether to allow
+ * transmission (and thus not throttling correctly).
+ *
+ * To fix this we use a timer-based notion of activity: a station is considered
+ * active if it has been scheduled within the last 100 ms; we keep a separate
+ * list of all the stations considered active in this manner, and lazily update
+ * the total weight of active stations from this list (filtering the stations in
+ * the list by their 'last active' time).
+ *
+ * We add one additional safeguard to guard against stations that manage to get
+ * scheduled every 100 ms but don't transmit a lot of data, and thus don't use
+ * up any airtime. Such stations would be able to get priority for an extended
+ * period of time if they do start transmitting at full capacity again, and so
+ * we add an explicit maximum for how far behind a station is allowed to fall in
+ * the virtual airtime domain. This limit is set to a relatively high value of
+ * 20 ms because the main mechanism for catching up idle stations is the active
+ * state as described above; i.e., the hard limit should only be hit in
+ * pathological cases.
+ */
+#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC)
+#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */
+
+static inline bool airtime_is_active(struct airtime_info *air_info, u64 now)
+{
+       return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION;
+}
+
+static inline void airtime_set_active(struct airtime_sched_info *air_sched,
+                                     struct airtime_info *air_info, u64 now)
+{
+       air_info->last_scheduled = now;
+       air_sched->last_schedule_activity = now;
+       list_move_tail(&air_info->list, &air_sched->active_list);
+}
+
+static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched,
+                                      u64 v_t, u64 now)
+{
+       air_sched->v_t = v_t;
+       return true;
+}
+
+static inline void init_airtime_info(struct airtime_info *air_info,
+                                    struct airtime_sched_info *air_sched)
+{
+       atomic_set(&air_info->aql_tx_pending, 0);
+       air_info->aql_limit_low = air_sched->aql_txq_limit_low;
+       air_info->aql_limit_high = air_sched->aql_txq_limit_high;
+       airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
+       INIT_LIST_HEAD(&air_info->list);
+}
+
 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
 {
        return ether_addr_equal(raddr, addr) ||
@@ -1809,6 +1958,14 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
                              u64 *cookie);
 int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
                              const u8 *buf, size_t len);
+void ieee80211_resort_txq(struct ieee80211_hw *hw,
+                         struct ieee80211_txq *txq);
+void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq,
+                             bool purge);
+void ieee80211_update_airtime_weight(struct ieee80211_local *local,
+                                    struct airtime_sched_info *air_sched,
+                                    u64 now, bool force);
 
 /* HT */
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
@@ -1879,7 +2036,6 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta);
 enum ieee80211_sta_rx_bandwidth
 ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
 enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
-void ieee80211_sta_set_rx_nss(struct sta_info *sta);
 void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
                                 struct ieee80211_mgmt *mgmt);
 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -2287,9 +2443,13 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
                                          struct net_device *dev,
                                          const u8 *addr);
 void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
-void ieee80211_tdls_chsw_work(struct work_struct *wk);
 void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
                                      const u8 *peer, u16 reason);
+void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+                                     struct sk_buff *skb);
+
+
 const char *ieee80211_get_reason_code_string(u16 reason_code);
 u16 ieee80211_encode_usf(int val);
 u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
index 2e2f73a..1e5e9fc 100644 (file)
@@ -476,14 +476,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
                                   GFP_KERNEL);
        }
 
-       /* APs need special treatment */
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
-               struct ieee80211_sub_if_data *vlan, *tmpsdata;
-
-               /* down all dependent devices, that is VLANs */
-               list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
-                                        u.vlan.list)
-                       dev_close(vlan->dev);
                WARN_ON(!list_empty(&sdata->u.ap.vlans));
        } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
                /* remove all packets in parent bc_buf pointing to this dev */
@@ -641,6 +634,15 @@ static int ieee80211_stop(struct net_device *dev)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
+       /* close all dependent VLAN interfaces before locking wiphy */
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               struct ieee80211_sub_if_data *vlan, *tmpsdata;
+
+               list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
+                                        u.vlan.list)
+                       dev_close(vlan->dev);
+       }
+
        wiphy_lock(sdata->local->hw.wiphy);
        ieee80211_do_stop(sdata, true);
        wiphy_unlock(sdata->local->hw.wiphy);
@@ -1316,13 +1318,130 @@ static void ieee80211_if_setup_no_queue(struct net_device *dev)
        dev->priv_flags |= IFF_NO_QUEUE;
 }
 
+static void ieee80211_iface_process_skb(struct ieee80211_local *local,
+                                       struct ieee80211_sub_if_data *sdata,
+                                       struct sk_buff *skb)
+{
+       struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+       if (ieee80211_is_action(mgmt->frame_control) &&
+           mgmt->u.action.category == WLAN_CATEGORY_BACK) {
+               struct sta_info *sta;
+               int len = skb->len;
+
+               mutex_lock(&local->sta_mtx);
+               sta = sta_info_get_bss(sdata, mgmt->sa);
+               if (sta) {
+                       switch (mgmt->u.action.u.addba_req.action_code) {
+                       case WLAN_ACTION_ADDBA_REQ:
+                               ieee80211_process_addba_request(local, sta,
+                                                               mgmt, len);
+                               break;
+                       case WLAN_ACTION_ADDBA_RESP:
+                               ieee80211_process_addba_resp(local, sta,
+                                                            mgmt, len);
+                               break;
+                       case WLAN_ACTION_DELBA:
+                               ieee80211_process_delba(sdata, sta,
+                                                       mgmt, len);
+                               break;
+                       default:
+                               WARN_ON(1);
+                               break;
+                       }
+               }
+               mutex_unlock(&local->sta_mtx);
+       } else if (ieee80211_is_action(mgmt->frame_control) &&
+                  mgmt->u.action.category == WLAN_CATEGORY_VHT) {
+               switch (mgmt->u.action.u.vht_group_notif.action_code) {
+               case WLAN_VHT_ACTION_OPMODE_NOTIF: {
+                       struct ieee80211_rx_status *status;
+                       enum nl80211_band band;
+                       struct sta_info *sta;
+                       u8 opmode;
+
+                       status = IEEE80211_SKB_RXCB(skb);
+                       band = status->band;
+                       opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
+
+                       mutex_lock(&local->sta_mtx);
+                       sta = sta_info_get_bss(sdata, mgmt->sa);
+
+                       if (sta)
+                               ieee80211_vht_handle_opmode(sdata, sta, opmode,
+                                                           band);
+
+                       mutex_unlock(&local->sta_mtx);
+                       break;
+               }
+               case WLAN_VHT_ACTION_GROUPID_MGMT:
+                       ieee80211_process_mu_groups(sdata, mgmt);
+                       break;
+               default:
+                       WARN_ON(1);
+                       break;
+               }
+       } else if (ieee80211_is_ext(mgmt->frame_control)) {
+               if (sdata->vif.type == NL80211_IFTYPE_STATION)
+                       ieee80211_sta_rx_queued_ext(sdata, skb);
+               else
+                       WARN_ON(1);
+       } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
+               struct ieee80211_hdr *hdr = (void *)mgmt;
+               struct sta_info *sta;
+
+               /*
+                * So the frame isn't mgmt, but frame_control
+                * is at the right place anyway, of course, so
+                * the if statement is correct.
+                *
+                * Warn if we have other data frame types here,
+                * they must not get here.
+                */
+               WARN_ON(hdr->frame_control &
+                               cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
+               WARN_ON(!(hdr->seq_ctrl &
+                               cpu_to_le16(IEEE80211_SCTL_FRAG)));
+               /*
+                * This was a fragment of a frame, received while
+                * a block-ack session was active. That cannot be
+                * right, so terminate the session.
+                */
+               mutex_lock(&local->sta_mtx);
+               sta = sta_info_get_bss(sdata, mgmt->sa);
+               if (sta) {
+                       u16 tid = ieee80211_get_tid(hdr);
+
+                       __ieee80211_stop_rx_ba_session(
+                               sta, tid, WLAN_BACK_RECIPIENT,
+                               WLAN_REASON_QSTA_REQUIRE_SETUP,
+                               true);
+               }
+               mutex_unlock(&local->sta_mtx);
+       } else switch (sdata->vif.type) {
+       case NL80211_IFTYPE_STATION:
+               ieee80211_sta_rx_queued_mgmt(sdata, skb);
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+               break;
+       case NL80211_IFTYPE_MESH_POINT:
+               if (!ieee80211_vif_is_mesh(&sdata->vif))
+                       break;
+               ieee80211_mesh_rx_queued_mgmt(sdata, skb);
+               break;
+       default:
+               WARN(1, "frame for unexpected interface type");
+               break;
+       }
+}
+
 static void ieee80211_iface_work(struct work_struct *work)
 {
        struct ieee80211_sub_if_data *sdata =
                container_of(work, struct ieee80211_sub_if_data, work);
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
-       struct sta_info *sta;
 
        if (!ieee80211_sdata_running(sdata))
                return;
@@ -1335,116 +1454,12 @@ static void ieee80211_iface_work(struct work_struct *work)
 
        /* first process frames */
        while ((skb = skb_dequeue(&sdata->skb_queue))) {
-               struct ieee80211_mgmt *mgmt = (void *)skb->data;
-
                kcov_remote_start_common(skb_get_kcov_handle(skb));
-               if (ieee80211_is_action(mgmt->frame_control) &&
-                   mgmt->u.action.category == WLAN_CATEGORY_BACK) {
-                       int len = skb->len;
-
-                       mutex_lock(&local->sta_mtx);
-                       sta = sta_info_get_bss(sdata, mgmt->sa);
-                       if (sta) {
-                               switch (mgmt->u.action.u.addba_req.action_code) {
-                               case WLAN_ACTION_ADDBA_REQ:
-                                       ieee80211_process_addba_request(
-                                                       local, sta, mgmt, len);
-                                       break;
-                               case WLAN_ACTION_ADDBA_RESP:
-                                       ieee80211_process_addba_resp(local, sta,
-                                                                    mgmt, len);
-                                       break;
-                               case WLAN_ACTION_DELBA:
-                                       ieee80211_process_delba(sdata, sta,
-                                                               mgmt, len);
-                                       break;
-                               default:
-                                       WARN_ON(1);
-                                       break;
-                               }
-                       }
-                       mutex_unlock(&local->sta_mtx);
-               } else if (ieee80211_is_action(mgmt->frame_control) &&
-                          mgmt->u.action.category == WLAN_CATEGORY_VHT) {
-                       switch (mgmt->u.action.u.vht_group_notif.action_code) {
-                       case WLAN_VHT_ACTION_OPMODE_NOTIF: {
-                               struct ieee80211_rx_status *status;
-                               enum nl80211_band band;
-                               u8 opmode;
-
-                               status = IEEE80211_SKB_RXCB(skb);
-                               band = status->band;
-                               opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
-
-                               mutex_lock(&local->sta_mtx);
-                               sta = sta_info_get_bss(sdata, mgmt->sa);
-
-                               if (sta)
-                                       ieee80211_vht_handle_opmode(sdata, sta,
-                                                                   opmode,
-                                                                   band);
-
-                               mutex_unlock(&local->sta_mtx);
-                               break;
-                       }
-                       case WLAN_VHT_ACTION_GROUPID_MGMT:
-                               ieee80211_process_mu_groups(sdata, mgmt);
-                               break;
-                       default:
-                               WARN_ON(1);
-                               break;
-                       }
-               } else if (ieee80211_is_ext(mgmt->frame_control)) {
-                       if (sdata->vif.type == NL80211_IFTYPE_STATION)
-                               ieee80211_sta_rx_queued_ext(sdata, skb);
-                       else
-                               WARN_ON(1);
-               } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
-                       struct ieee80211_hdr *hdr = (void *)mgmt;
-                       /*
-                        * So the frame isn't mgmt, but frame_control
-                        * is at the right place anyway, of course, so
-                        * the if statement is correct.
-                        *
-                        * Warn if we have other data frame types here,
-                        * they must not get here.
-                        */
-                       WARN_ON(hdr->frame_control &
-                                       cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
-                       WARN_ON(!(hdr->seq_ctrl &
-                                       cpu_to_le16(IEEE80211_SCTL_FRAG)));
-                       /*
-                        * This was a fragment of a frame, received while
-                        * a block-ack session was active. That cannot be
-                        * right, so terminate the session.
-                        */
-                       mutex_lock(&local->sta_mtx);
-                       sta = sta_info_get_bss(sdata, mgmt->sa);
-                       if (sta) {
-                               u16 tid = ieee80211_get_tid(hdr);
 
-                               __ieee80211_stop_rx_ba_session(
-                                       sta, tid, WLAN_BACK_RECIPIENT,
-                                       WLAN_REASON_QSTA_REQUIRE_SETUP,
-                                       true);
-                       }
-                       mutex_unlock(&local->sta_mtx);
-               } else switch (sdata->vif.type) {
-               case NL80211_IFTYPE_STATION:
-                       ieee80211_sta_rx_queued_mgmt(sdata, skb);
-                       break;
-               case NL80211_IFTYPE_ADHOC:
-                       ieee80211_ibss_rx_queued_mgmt(sdata, skb);
-                       break;
-               case NL80211_IFTYPE_MESH_POINT:
-                       if (!ieee80211_vif_is_mesh(&sdata->vif))
-                               break;
-                       ieee80211_mesh_rx_queued_mgmt(sdata, skb);
-                       break;
-               default:
-                       WARN(1, "frame for unexpected interface type");
-                       break;
-               }
+               if (skb->protocol == cpu_to_be16(ETH_P_TDLS))
+                       ieee80211_process_tdls_channel_switch(sdata, skb);
+               else
+                       ieee80211_iface_process_skb(local, sdata, skb);
 
                kfree_skb(skb);
                kcov_remote_stop();
@@ -1591,6 +1606,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
+               if (!list_empty(&sdata->u.ap.vlans))
+                       return -EBUSY;
+               break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_OCB:
@@ -1959,6 +1977,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                }
        }
 
+       for (i = 0; i < IEEE80211_NUM_ACS; i++)
+               init_airtime_info(&sdata->airtime[i], &local->airtime[i]);
+
        ieee80211_set_default_queues(sdata);
 
        sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
index b275c88..6de8d0a 100644 (file)
@@ -259,7 +259,6 @@ static void tpt_trig_timer(struct timer_list *t)
 {
        struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer);
        struct ieee80211_local *local = tpt_trig->local;
-       struct led_classdev *led_cdev;
        unsigned long on, off, tpt;
        int i;
 
@@ -283,10 +282,7 @@ static void tpt_trig_timer(struct timer_list *t)
                }
        }
 
-       read_lock(&local->tpt_led.leddev_list_lock);
-       list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
-               led_blink_set(led_cdev, &on, &off);
-       read_unlock(&local->tpt_led.leddev_list_lock);
+       led_trigger_blink(&local->tpt_led, &on, &off);
 }
 
 const char *
@@ -341,7 +337,6 @@ static void ieee80211_start_tpt_led_trig(struct ieee80211_local *local)
 static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
 {
        struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
-       struct led_classdev *led_cdev;
 
        if (!tpt_trig->running)
                return;
@@ -349,10 +344,7 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
        tpt_trig->running = false;
        del_timer_sync(&tpt_trig->timer);
 
-       read_lock(&local->tpt_led.leddev_list_lock);
-       list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
-               led_set_brightness(led_cdev, LED_OFF);
-       read_unlock(&local->tpt_led.leddev_list_lock);
+       led_trigger_event(&local->tpt_led, LED_OFF);
 }
 
 void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
index 62145e5..05f4c3c 100644 (file)
@@ -252,18 +252,18 @@ static void ieee80211_restart_work(struct work_struct *work)
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local, restart_work);
        struct ieee80211_sub_if_data *sdata;
+       int ret;
 
        /* wait for scan work complete */
        flush_workqueue(local->workqueue);
        flush_work(&local->sched_scan_stopped_work);
+       flush_work(&local->radar_detected_work);
+
+       rtnl_lock();
 
        WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
             "%s called with hardware scan in progress\n", __func__);
 
-       flush_work(&local->radar_detected_work);
-       /* we might do interface manipulations, so need both */
-       rtnl_lock();
-       wiphy_lock(local->hw.wiphy);
        list_for_each_entry(sdata, &local->interfaces, list) {
                /*
                 * XXX: there may be more work for other vif types and even
@@ -301,8 +301,12 @@ static void ieee80211_restart_work(struct work_struct *work)
        /* wait for all packet processing to be done */
        synchronize_net();
 
-       ieee80211_reconfig(local);
+       ret = ieee80211_reconfig(local);
        wiphy_unlock(local->hw.wiphy);
+
+       if (ret)
+               cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
        rtnl_unlock();
 }
 
@@ -701,10 +705,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        spin_lock_init(&local->queue_stop_reason_lock);
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
-               INIT_LIST_HEAD(&local->active_txqs[i]);
-               spin_lock_init(&local->active_txq_lock[i]);
-               local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
-               local->aql_txq_limit_high[i] =
+               struct airtime_sched_info *air_sched = &local->airtime[i];
+
+               air_sched->active_txqs = RB_ROOT_CACHED;
+               INIT_LIST_HEAD(&air_sched->active_list);
+               spin_lock_init(&air_sched->lock);
+               air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
+               air_sched->aql_txq_limit_high =
                        IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
        }
 
@@ -734,8 +741,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        INIT_WORK(&local->sched_scan_stopped_work,
                  ieee80211_sched_scan_stopped_work);
 
-       INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
-
        spin_lock_init(&local->ack_status_lock);
        idr_init(&local->ack_status_frames);
 
@@ -752,7 +757,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
 
        skb_queue_head_init(&local->skb_queue);
        skb_queue_head_init(&local->skb_queue_unreliable);
-       skb_queue_head_init(&local->skb_queue_tdls_chsw);
 
        ieee80211_alloc_led_names(local);
 
@@ -1009,8 +1013,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                supp_ht = supp_ht || sband->ht_cap.ht_supported;
                supp_vht = supp_vht || sband->vht_cap.vht_supported;
 
-               if (!supp_he)
-                       supp_he = !!ieee80211_get_he_sta_cap(sband);
+               for (i = 0; i < sband->n_iftype_data; i++) {
+                       const struct ieee80211_sband_iftype_data *iftd;
+
+                       iftd = &sband->iftype_data[i];
+
+                       supp_he = supp_he || (iftd && iftd->he_cap.has_he);
+               }
 
                /* HT, VHT, HE require QoS, thus >= 4 queues */
                if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS &&
@@ -1384,7 +1393,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
        cancel_delayed_work_sync(&local->roc_work);
        cancel_work_sync(&local->restart_work);
        cancel_work_sync(&local->reconfig_filter);
-       cancel_work_sync(&local->tdls_chsw_work);
        flush_work(&local->sched_scan_stopped_work);
        flush_work(&local->radar_detected_work);
 
@@ -1396,7 +1404,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
                wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
-       skb_queue_purge(&local->skb_queue_tdls_chsw);
 
        wiphy_unregister(local->hw.wiphy);
        destroy_workqueue(local->workqueue);
index 40492d1..77080b4 100644 (file)
@@ -134,7 +134,7 @@ struct mesh_path {
  * gate's mpath may or may not be resolved and active.
  * @gates_lock: protects updates to known_gates
  * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
- * @walk_head: linked list containging all mesh_path objects
+ * @walk_head: linked list containing all mesh_path objects
  * @walk_lock: lock protecting walk_head
  * @entries: number of entries in the table
  */
index 3db514c..a05b615 100644 (file)
@@ -1124,7 +1124,7 @@ enddiscovery:
  * forwarding information is found.
  *
  * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
- * skb is freeed here if no mpath could be allocated.
+ * skb is freed here if no mpath could be allocated.
  */
 int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
                         struct sk_buff *skb)
index 620ecf9..efbefcb 100644 (file)
@@ -122,7 +122,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
                hdr = (struct ieee80211_hdr *) skb->data;
 
                /* we preserve the previous mesh header and only add
-                * the new addreses */
+                * the new addresses */
                mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
                mshdr->flags = MESH_FLAGS_AE_A5_A6;
                memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
index aca26df..a691584 100644 (file)
@@ -150,7 +150,7 @@ out:
  * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
  * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
  * selected if any non-HT peers are present in our MBSS.  20MHz-protection mode
- * is selected if all peers in our 20/40MHz MBSS support HT and atleast one
+ * is selected if all peers in our 20/40MHz MBSS support HT and at least one
  * HT20 peer is present. Otherwise no-protection mode is selected.
  */
 static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
index 2480bd0..a00f11a 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  */
 
 #include <linux/delay.h>
@@ -371,7 +371,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
        struct cfg80211_chan_def chandef;
        u16 ht_opmode;
        u32 flags;
-       enum ieee80211_sta_rx_bandwidth new_sta_bw;
        u32 vht_cap_info = 0;
        int ret;
 
@@ -385,7 +384,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
 
        /* don't check HE if we associated as non-HE station */
        if (ifmgd->flags & IEEE80211_STA_DISABLE_HE ||
-           !ieee80211_get_he_sta_cap(sband))
+           !ieee80211_get_he_iftype_cap(sband,
+                                        ieee80211_vif_type_p2p(&sdata->vif)))
+
                he_oper = NULL;
 
        if (WARN_ON_ONCE(!sta))
@@ -445,40 +446,13 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
                                      IEEE80211_STA_DISABLE_160MHZ)) ||
            !cfg80211_chandef_valid(&chandef)) {
                sdata_info(sdata,
-                          "AP %pM changed bandwidth in a way we can't support - disconnect\n",
-                          ifmgd->bssid);
+                          "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
+                          ifmgd->bssid, flags, ifmgd->flags);
                return -EINVAL;
        }
 
-       switch (chandef.width) {
-       case NL80211_CHAN_WIDTH_20_NOHT:
-       case NL80211_CHAN_WIDTH_20:
-               new_sta_bw = IEEE80211_STA_RX_BW_20;
-               break;
-       case NL80211_CHAN_WIDTH_40:
-               new_sta_bw = IEEE80211_STA_RX_BW_40;
-               break;
-       case NL80211_CHAN_WIDTH_80:
-               new_sta_bw = IEEE80211_STA_RX_BW_80;
-               break;
-       case NL80211_CHAN_WIDTH_80P80:
-       case NL80211_CHAN_WIDTH_160:
-               new_sta_bw = IEEE80211_STA_RX_BW_160;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (new_sta_bw > sta->cur_max_bandwidth)
-               new_sta_bw = sta->cur_max_bandwidth;
-
-       if (new_sta_bw < sta->sta.bandwidth) {
-               sta->sta.bandwidth = new_sta_bw;
-               rate_control_rate_update(local, sband, sta,
-                                        IEEE80211_RC_BW_CHANGED);
-       }
-
        ret = ieee80211_vif_change_bandwidth(sdata, &chandef, changed);
+
        if (ret) {
                sdata_info(sdata,
                           "AP %pM changed bandwidth to incompatible one - disconnect\n",
@@ -486,12 +460,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
                return ret;
        }
 
-       if (new_sta_bw > sta->sta.bandwidth) {
-               sta->sta.bandwidth = new_sta_bw;
-               rate_control_rate_update(local, sband, sta,
-                                        IEEE80211_RC_BW_CHANGED);
-       }
-
        return 0;
 }
 
@@ -617,7 +585,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
                cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
 
        /*
-        * If some other vif is using the MU-MIMO capablity we cannot associate
+        * If some other vif is using the MU-MIMO capability we cannot associate
         * using MU-MIMO - this will lead to contradictions in the group-id
         * mechanism.
         * Ownership is defined since association request, in order to avoid
@@ -676,7 +644,8 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_unlock();
 
-       he_cap = ieee80211_get_he_sta_cap(sband);
+       he_cap = ieee80211_get_he_iftype_cap(sband,
+                                            ieee80211_vif_type_p2p(&sdata->vif));
        if (!he_cap || !reg_cap)
                return;
 
@@ -712,6 +681,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        u32 rates = 0;
        __le16 listen_int;
        struct element *ext_capa = NULL;
+       enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
+       const struct ieee80211_sband_iftype_data *iftd;
+       struct ieee80211_prep_tx_info info = {};
 
        /* we know it's writable, cast away the const */
        if (assoc_data->ie_len)
@@ -756,6 +728,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                }
        }
 
+       iftd = ieee80211_get_sband_iftype_data(sband, iftype);
+
        skb = alloc_skb(local->hw.extra_tx_headroom +
                        sizeof(*mgmt) + /* bit too much but doesn't matter */
                        2 + assoc_data->ssid_len + /* SSID */
@@ -770,7 +744,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                        2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) +
                        assoc_data->ie_len + /* extra IEs */
                        (assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) +
-                       9, /* WMM */
+                       9 + /* WMM */
+                       (iftd ? iftd->vendor_elems.len : 0),
                        GFP_KERNEL);
        if (!skb)
                return;
@@ -810,12 +785,14 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                mgmt->u.reassoc_req.listen_interval = listen_int;
                memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_bssid,
                       ETH_ALEN);
+               info.subtype = IEEE80211_STYPE_REASSOC_REQ;
        } else {
                skb_put(skb, 4);
                mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                                  IEEE80211_STYPE_ASSOC_REQ);
                mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
                mgmt->u.assoc_req.listen_interval = listen_int;
+               info.subtype = IEEE80211_STYPE_ASSOC_REQ;
        }
 
        /* SSID */
@@ -1043,6 +1020,9 @@ skip_rates:
                ieee80211_add_s1g_capab_ie(sdata, &sband->s1g_cap, skb);
        }
 
+       if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len)
+               skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len);
+
        /* add any remaining custom (i.e. vendor specific here) IEs */
        if (assoc_data->ie_len) {
                noffset = assoc_data->ie_len;
@@ -1060,7 +1040,7 @@ skip_rates:
        ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
        ifmgd->assoc_req_ies_len = pos - ie_start;
 
-       drv_mgd_prepare_tx(local, sdata, 0);
+       drv_mgd_prepare_tx(local, sdata, &info);
 
        IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
        if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
@@ -1094,11 +1074,6 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        struct ieee80211_hdr_3addr *nullfunc;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       /* Don't send NDPs when STA is connected HE */
-       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-           !(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
-               return;
-
        skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
                !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
        if (!skb)
@@ -1130,10 +1105,6 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
        if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
                return;
 
-       /* Don't send NDPs when connected HE */
-       if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
-               return;
-
        skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
        if (!skb)
                return;
@@ -1183,10 +1154,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
         */
 
        if (sdata->reserved_chanctx) {
-               struct ieee80211_supported_band *sband = NULL;
-               struct sta_info *mgd_sta = NULL;
-               enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
-
                /*
                 * with multi-vif csa driver may call ieee80211_csa_finish()
                 * many times while waiting for other interfaces to use their
@@ -1195,48 +1162,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                if (sdata->reserved_ready)
                        goto out;
 
-               if (sdata->vif.bss_conf.chandef.width !=
-                   sdata->csa_chandef.width) {
-                       /*
-                        * For managed interface, we need to also update the AP
-                        * station bandwidth and align the rate scale algorithm
-                        * on the bandwidth change. Here we only consider the
-                        * bandwidth of the new channel definition (as channel
-                        * switch flow does not have the full HT/VHT/HE
-                        * information), assuming that if additional changes are
-                        * required they would be done as part of the processing
-                        * of the next beacon from the AP.
-                        */
-                       switch (sdata->csa_chandef.width) {
-                       case NL80211_CHAN_WIDTH_20_NOHT:
-                       case NL80211_CHAN_WIDTH_20:
-                       default:
-                               bw = IEEE80211_STA_RX_BW_20;
-                               break;
-                       case NL80211_CHAN_WIDTH_40:
-                               bw = IEEE80211_STA_RX_BW_40;
-                               break;
-                       case NL80211_CHAN_WIDTH_80:
-                               bw = IEEE80211_STA_RX_BW_80;
-                               break;
-                       case NL80211_CHAN_WIDTH_80P80:
-                       case NL80211_CHAN_WIDTH_160:
-                               bw = IEEE80211_STA_RX_BW_160;
-                               break;
-                       }
-
-                       mgd_sta = sta_info_get(sdata, ifmgd->bssid);
-                       sband =
-                               local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
-               }
-
-               if (sdata->vif.bss_conf.chandef.width >
-                   sdata->csa_chandef.width) {
-                       mgd_sta->sta.bandwidth = bw;
-                       rate_control_rate_update(local, sband, mgd_sta,
-                                                IEEE80211_RC_BW_CHANGED);
-               }
-
                ret = ieee80211_vif_use_reserved_context(sdata);
                if (ret) {
                        sdata_info(sdata,
@@ -1247,13 +1172,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                        goto out;
                }
 
-               if (sdata->vif.bss_conf.chandef.width <
-                   sdata->csa_chandef.width) {
-                       mgd_sta->sta.bandwidth = bw;
-                       rate_control_rate_update(local, sband, mgd_sta,
-                                                IEEE80211_RC_BW_CHANGED);
-               }
-
                goto out;
        }
 
@@ -2341,6 +2259,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_local *local = sdata->local;
        u32 changed = 0;
+       struct ieee80211_prep_tx_info info = {
+               .subtype = stype,
+       };
 
        sdata_assert_lock(sdata);
 
@@ -2390,8 +2311,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
                 * driver requested so.
                 */
                if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP) &&
-                   !ifmgd->have_beacon)
-                       drv_mgd_prepare_tx(sdata->local, sdata, 0);
+                   !ifmgd->have_beacon) {
+                       drv_mgd_prepare_tx(sdata->local, sdata, &info);
+               }
 
                ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid,
                                               ifmgd->bssid, stype, reason,
@@ -2402,6 +2324,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        if (tx)
                ieee80211_flush_queues(local, sdata, false);
 
+       drv_mgd_complete_tx(sdata->local, sdata, &info);
+
        /* clear bssid only after building the needed mgmt frames */
        eth_zero_addr(ifmgd->bssid);
 
@@ -2617,10 +2541,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
 
        if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
                ifmgd->nullfunc_failed = false;
-               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
-                       ifmgd->probe_send_count--;
-               else
-                       ieee80211_send_nullfunc(sdata->local, sdata, false);
+               ieee80211_send_nullfunc(sdata->local, sdata, false);
        } else {
                int ssid_len;
 
@@ -2952,6 +2873,9 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
        u8 *pos;
        struct ieee802_11_elems elems;
        u32 tx_flags = 0;
+       struct ieee80211_prep_tx_info info = {
+               .subtype = IEEE80211_STYPE_AUTH,
+       };
 
        pos = mgmt->u.auth.variable;
        ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
@@ -2959,7 +2883,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
        if (!elems.challenge)
                return;
        auth_data->expected_transaction = 4;
-       drv_mgd_prepare_tx(sdata->local, sdata, 0);
+       drv_mgd_prepare_tx(sdata->local, sdata, &info);
        if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
                tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
                           IEEE80211_TX_INTFL_MLME_CONN_TX;
@@ -3012,6 +2936,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                .type = MLME_EVENT,
                .u.mlme.data = AUTH_EVENT,
        };
+       struct ieee80211_prep_tx_info info = {
+               .subtype = IEEE80211_STYPE_AUTH,
+       };
 
        sdata_assert_lock(sdata);
 
@@ -3040,7 +2967,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                           mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
                           auth_transaction,
                           ifmgd->auth_data->expected_transaction);
-               return;
+               goto notify_driver;
        }
 
        if (status_code != WLAN_STATUS_SUCCESS) {
@@ -3051,7 +2978,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                     (auth_transaction == 1 &&
                      (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT ||
                       status_code == WLAN_STATUS_SAE_PK))))
-                       return;
+                       goto notify_driver;
 
                sdata_info(sdata, "%pM denied authentication (status %d)\n",
                           mgmt->sa, status_code);
@@ -3059,7 +2986,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                event.u.mlme.status = MLME_DENIED;
                event.u.mlme.reason = status_code;
                drv_event_callback(sdata->local, sdata, &event);
-               return;
+               goto notify_driver;
        }
 
        switch (ifmgd->auth_data->algorithm) {
@@ -3081,10 +3008,11 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
        default:
                WARN_ONCE(1, "invalid auth alg %d",
                          ifmgd->auth_data->algorithm);
-               return;
+               goto notify_driver;
        }
 
        event.u.mlme.status = MLME_SUCCESS;
+       info.success = 1;
        drv_event_callback(sdata->local, sdata, &event);
        if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE ||
            (auth_transaction == 2 &&
@@ -3098,6 +3026,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
        }
 
        cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+notify_driver:
+       drv_mgd_complete_tx(sdata->local, sdata, &info);
 }
 
 #define case_WLAN(type) \
@@ -3314,6 +3244,23 @@ static int ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
+static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata,
+                                       struct ieee80211_bss_conf *bss_conf,
+                                       struct ieee80211_supported_band *sband,
+                                       struct sta_info *sta)
+{
+       const struct ieee80211_sta_he_cap *own_he_cap =
+               ieee80211_get_he_iftype_cap(sband,
+                                           ieee80211_vif_type_p2p(&sdata->vif));
+
+       return bss_conf->he_support &&
+               (sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+                       IEEE80211_HE_MAC_CAP2_BCAST_TWT) &&
+               own_he_cap &&
+               (own_he_cap->he_cap_elem.mac_cap_info[2] &
+                       IEEE80211_HE_MAC_CAP2_BCAST_TWT);
+}
+
 static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                                    struct cfg80211_bss *cbss,
                                    struct ieee80211_mgmt *mgmt, size_t len,
@@ -3529,6 +3476,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                bss_conf->twt_protected = false;
        }
 
+       bss_conf->twt_broadcast =
+               ieee80211_twt_bcast_support(sdata, bss_conf, sband, sta);
+
        if (bss_conf->he_support) {
                bss_conf->he_bss_color.color =
                        le32_get_bits(elems->he_operation->he_oper_params,
@@ -3699,6 +3649,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                .type = MLME_EVENT,
                .u.mlme.data = ASSOC_EVENT,
        };
+       struct ieee80211_prep_tx_info info = {};
 
        sdata_assert_lock(sdata);
 
@@ -3728,6 +3679,15 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                aid = 0; /* TODO */
        }
 
+       /*
+        * Note: this may not be perfect, AP might misbehave - if
+        * anyone needs to rely on perfect complete notification
+        * with the exact right subtype, then we need to track what
+        * we actually transmitted.
+        */
+       info.subtype = reassoc ? IEEE80211_STYPE_REASSOC_REQ :
+                                IEEE80211_STYPE_ASSOC_REQ;
+
        sdata_info(sdata,
                   "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
                   reassoc ? "Rea" : "A", mgmt->sa,
@@ -3753,7 +3713,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                assoc_data->timeout_started = true;
                if (ms > IEEE80211_ASSOC_TIMEOUT)
                        run_again(sdata, assoc_data->timeout);
-               return;
+               goto notify_driver;
        }
 
        if (status_code != WLAN_STATUS_SUCCESS) {
@@ -3768,7 +3728,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                        /* oops -- internal error -- send timeout for now */
                        ieee80211_destroy_assoc_data(sdata, false, false);
                        cfg80211_assoc_timeout(sdata->dev, cbss);
-                       return;
+                       goto notify_driver;
                }
                event.u.mlme.status = MLME_SUCCESS;
                drv_event_callback(sdata->local, sdata, &event);
@@ -3786,10 +3746,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        if (sdata->tx_conf[ac].uapsd)
                                uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
+
+               info.success = 1;
        }
 
        cfg80211_rx_assoc_resp(sdata->dev, cbss, (u8 *)mgmt, len, uapsd_queues,
                               ifmgd->assoc_req_ies, ifmgd->assoc_req_ies_len);
+notify_driver:
+       drv_mgd_complete_tx(sdata->local, sdata, &info);
 }
 
 static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -4062,10 +4026,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                if (elems.mbssid_config_ie)
                        bss_conf->profile_periodicity =
                                elems.mbssid_config_ie->profile_periodicity;
+               else
+                       bss_conf->profile_periodicity = 0;
 
                if (elems.ext_capab_len >= 11 &&
                    (elems.ext_capab[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
                        bss_conf->ema_ap = true;
+               else
+                       bss_conf->ema_ap = false;
 
                /* continue assoc process */
                ifmgd->assoc_data->timeout = jiffies;
@@ -4404,7 +4372,9 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
        u32 tx_flags = 0;
        u16 trans = 1;
        u16 status = 0;
-       u16 prepare_tx_duration = 0;
+       struct ieee80211_prep_tx_info info = {
+               .subtype = IEEE80211_STYPE_AUTH,
+       };
 
        sdata_assert_lock(sdata);
 
@@ -4427,10 +4397,9 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
        }
 
        if (auth_data->algorithm == WLAN_AUTH_SAE)
-               prepare_tx_duration =
-                       jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE);
+               info.duration = jiffies_to_msecs(IEEE80211_AUTH_TIMEOUT_SAE);
 
-       drv_mgd_prepare_tx(local, sdata, prepare_tx_duration);
+       drv_mgd_prepare_tx(local, sdata, &info);
 
        sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
                   auth_data->bss->bssid, auth_data->tries,
@@ -4925,11 +4894,13 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
 }
 
 static bool
-ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband,
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+                                   struct ieee80211_supported_band *sband,
                                    const struct ieee80211_he_operation *he_op)
 {
        const struct ieee80211_sta_he_cap *sta_he_cap =
-               ieee80211_get_he_sta_cap(sband);
+               ieee80211_get_he_iftype_cap(sband,
+                                           ieee80211_vif_type_p2p(&sdata->vif));
        u16 ap_min_req_set;
        int i;
 
@@ -5023,7 +4994,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
        }
 
-       if (!ieee80211_get_he_sta_cap(sband))
+       if (!ieee80211_get_he_iftype_cap(sband,
+                                        ieee80211_vif_type_p2p(&sdata->vif)))
                ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
 
        rcu_read_lock();
@@ -5081,7 +5053,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                else
                        he_oper = NULL;
 
-               if (!ieee80211_verify_sta_he_mcs_support(sband, he_oper))
+               if (!ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
                        ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
        }
 
@@ -5651,15 +5623,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                       2 * FILS_NONCE_LEN);
 
        assoc_data->bss = req->bss;
-
-       if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
-               if (ifmgd->powersave)
-                       sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
-               else
-                       sdata->smps_mode = IEEE80211_SMPS_OFF;
-       } else
-               sdata->smps_mode = ifmgd->req_smps;
-
        assoc_data->capability = req->bss->capability;
        assoc_data->supp_rates = bss->supp_rates;
        assoc_data->supp_rates_len = bss->supp_rates_len;
@@ -5766,6 +5729,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        if (err)
                goto err_clear;
 
+       if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
+               if (ifmgd->powersave)
+                       sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
+               else
+                       sdata->smps_mode = IEEE80211_SMPS_OFF;
+       } else {
+               sdata->smps_mode = ifmgd->req_smps;
+       }
+
        rcu_read_lock();
        beacon_ies = rcu_dereference(req->bss->beacon_ies);
 
@@ -5802,12 +5774,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                                              beacon_ies->data, beacon_ies->len);
                if (elem && elem->datalen >= 3)
                        sdata->vif.bss_conf.profile_periodicity = elem->data[2];
+               else
+                       sdata->vif.bss_conf.profile_periodicity = 0;
 
                elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
                                          beacon_ies->data, beacon_ies->len);
                if (elem && elem->datalen >= 11 &&
                    (elem->data[10] & WLAN_EXT_CAPA11_EMA_SUPPORT))
                        sdata->vif.bss_conf.ema_ap = true;
+               else
+                       sdata->vif.bss_conf.ema_ap = false;
        } else {
                assoc_data->timeout = jiffies;
                assoc_data->timeout_started = true;
@@ -5846,6 +5822,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
        bool tx = !req->local_state_change;
+       struct ieee80211_prep_tx_info info = {
+               .subtype = IEEE80211_STYPE_DEAUTH,
+       };
 
        if (ifmgd->auth_data &&
            ether_addr_equal(ifmgd->auth_data->bss->bssid, req->bssid)) {
@@ -5854,7 +5833,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                           req->bssid, req->reason_code,
                           ieee80211_get_reason_code_string(req->reason_code));
 
-               drv_mgd_prepare_tx(sdata->local, sdata, 0);
+               drv_mgd_prepare_tx(sdata->local, sdata, &info);
                ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               req->reason_code, tx,
@@ -5863,7 +5842,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                ieee80211_report_disconnect(sdata, frame_buf,
                                            sizeof(frame_buf), true,
                                            req->reason_code, false);
-
+               drv_mgd_complete_tx(sdata->local, sdata, &info);
                return 0;
        }
 
@@ -5874,7 +5853,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                           req->bssid, req->reason_code,
                           ieee80211_get_reason_code_string(req->reason_code));
 
-               drv_mgd_prepare_tx(sdata->local, sdata, 0);
+               drv_mgd_prepare_tx(sdata->local, sdata, &info);
                ieee80211_send_deauth_disassoc(sdata, req->bssid, req->bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               req->reason_code, tx,
@@ -5898,6 +5877,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                ieee80211_report_disconnect(sdata, frame_buf,
                                            sizeof(frame_buf), true,
                                            req->reason_code, false);
+               drv_mgd_complete_tx(sdata->local, sdata, &info);
                return 0;
        }
 
index 63652c3..e5935e3 100644 (file)
@@ -297,15 +297,11 @@ void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata)
 static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
 {
        struct sk_buff *skb = txrc->skb;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       __le16 fc;
-
-       fc = hdr->frame_control;
 
        return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
                               IEEE80211_TX_CTL_USE_MINRATE)) ||
-               !ieee80211_is_data(fc);
+               !ieee80211_is_tx_data(skb);
 }
 
 static void rc_send_low_basicrate(struct ieee80211_tx_rate *rate,
@@ -396,6 +392,10 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
        int mcast_rate;
        bool use_basicrate = false;
 
+       if (ieee80211_is_tx_data(txrc->skb) &&
+           info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return false;
+
        if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
                __rate_control_send_low(txrc->hw, sband, pubsta, info,
                                        txrc->rate_idx_mask);
@@ -870,7 +870,6 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
                            int max_rates)
 {
        struct ieee80211_sub_if_data *sdata;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_supported_band *sband;
 
@@ -882,7 +881,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
        sdata = vif_to_sdata(vif);
        sband = sdata->local->hw.wiphy->bands[info->band];
 
-       if (ieee80211_is_data(hdr->frame_control))
+       if (ieee80211_is_tx_data(skb))
                rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
 
        if (dest[0].idx < 0)
index 6487b05..72b44d4 100644 (file)
@@ -434,7 +434,7 @@ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
        unsigned int nsecs = 0, overhead = mi->overhead;
        unsigned int ampdu_len = 1;
 
-       /* do not account throughput if sucess prob is below 10% */
+       /* do not account throughput if success prob is below 10% */
        if (prob_avg < MINSTREL_FRAC(10, 100))
                return 0;
 
@@ -1176,29 +1176,6 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
 }
 
 static void
-minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
-       u16 tid;
-
-       if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
-               return;
-
-       if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
-               return;
-
-       if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
-               return;
-
-       tid = ieee80211_get_tid(hdr);
-       if (likely(sta->ampdu_mlme.tid_tx[tid]))
-               return;
-
-       ieee80211_start_tx_ba_session(pubsta, tid, 0);
-}
-
-static void
 minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
                       void *priv_sta, struct ieee80211_tx_status *st)
 {
@@ -1211,6 +1188,10 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
        bool last, update = false;
        int i;
 
+       /* Ignore packet that was sent with noAck flag */
+       if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+               return;
+
        /* This packet was aggregated but doesn't carry status info */
        if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
            !(info->flags & IEEE80211_TX_STAT_AMPDU))
@@ -1498,10 +1479,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        struct minstrel_priv *mp = priv;
        u16 sample_idx;
 
-       if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
-           !minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_prob_rate)))
-               minstrel_aggr_check(sta, txrc->skb);
-
        info->flags |= mi->tx_flags;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -1514,7 +1491,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
            (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
                return;
 
-       if (time_is_before_jiffies(mi->sample_time))
+       if (time_is_after_jiffies(mi->sample_time))
                return;
 
        mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
@@ -1907,6 +1884,7 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
 
 static const struct rate_control_ops mac80211_minstrel_ht = {
        .name = "minstrel_ht",
+       .capa = RATE_CTRL_CAPA_AMPDU_TRIGGER,
        .tx_status_ext = minstrel_ht_tx_status,
        .get_rate = minstrel_ht_get_rate,
        .rate_init = minstrel_ht_rate_init,
index 1bb43ed..771921c 100644 (file)
@@ -214,6 +214,24 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
        return len;
 }
 
+static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
+                                          struct sta_info *sta,
+                                          struct sk_buff *skb)
+{
+       skb_queue_tail(&sdata->skb_queue, skb);
+       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+       if (sta)
+               sta->rx_stats.packets++;
+}
+
+static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
+                                        struct sta_info *sta,
+                                        struct sk_buff *skb)
+{
+       skb->protocol = 0;
+       __ieee80211_queue_skb_to_iface(sdata, sta, skb);
+}
+
 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
                                         struct sk_buff *skb,
                                         int rtap_space)
@@ -254,8 +272,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
        if (!skb)
                return;
 
-       skb_queue_tail(&sdata->skb_queue, skb);
-       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+       ieee80211_queue_skb_to_iface(sdata, NULL, skb);
 }
 
 /*
@@ -1339,7 +1356,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
                                       struct sk_buff_head *frames)
 {
        struct sk_buff *skb = rx->skb;
-       struct ieee80211_local *local = rx->local;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct sta_info *sta = rx->sta;
        struct tid_ampdu_rx *tid_agg_rx;
@@ -1391,8 +1407,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
        /* if this mpdu is fragmented - terminate rx aggregation session */
        sc = le16_to_cpu(hdr->seq_ctrl);
        if (sc & IEEE80211_SCTL_FRAG) {
-               skb_queue_tail(&rx->sdata->skb_queue, skb);
-               ieee80211_queue_work(&local->hw, &rx->sdata->work);
+               ieee80211_queue_skb_to_iface(rx->sdata, NULL, skb);
                return;
        }
 
@@ -1563,12 +1578,8 @@ static void sta_ps_start(struct sta_info *sta)
 
        for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
                struct ieee80211_txq *txq = sta->sta.txq[tid];
-               struct txq_info *txqi = to_txq_info(txq);
 
-               spin_lock(&local->active_txq_lock[txq->ac]);
-               if (!list_empty(&txqi->schedule_order))
-                       list_del_init(&txqi->schedule_order);
-               spin_unlock(&local->active_txq_lock[txq->ac]);
+               ieee80211_unschedule_txq(&local->hw, txq, false);
 
                if (txq_has_queue(txq))
                        set_bit(tid, &sta->txq_buffered_tids);
@@ -2240,17 +2251,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        sc = le16_to_cpu(hdr->seq_ctrl);
        frag = sc & IEEE80211_SCTL_FRAG;
 
-       if (is_multicast_ether_addr(hdr->addr1)) {
-               I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
-               goto out_no_led;
-       }
-
        if (rx->sta)
                cache = &rx->sta->frags;
 
        if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
                goto out;
 
+       if (is_multicast_ether_addr(hdr->addr1))
+               return RX_DROP_MONITOR;
+
        I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
        if (skb_linearize(rx->skb))
@@ -2376,7 +2385,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
 
  out:
        ieee80211_led_rx(rx->local);
- out_no_led:
        if (rx->sta)
                rx->sta->rx_stats.packets++;
        return RX_CONTINUE;
@@ -3012,11 +3020,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
                    tf->category == WLAN_CATEGORY_TDLS &&
                    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
                     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
-                       skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
-                       schedule_work(&local->tdls_chsw_work);
-                       if (rx->sta)
-                               rx->sta->rx_stats.packets++;
-
+                       rx->skb->protocol = cpu_to_be16(ETH_P_TDLS);
+                       __ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
                        return RX_QUEUED;
                }
        }
@@ -3496,10 +3501,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
        return RX_QUEUED;
 
  queue:
-       skb_queue_tail(&sdata->skb_queue, rx->skb);
-       ieee80211_queue_work(&local->hw, &sdata->work);
-       if (rx->sta)
-               rx->sta->rx_stats.packets++;
+       ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
        return RX_QUEUED;
 }
 
@@ -3647,10 +3649,7 @@ ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
                return RX_DROP_MONITOR;
 
        /* for now only beacons are ext, so queue them */
-       skb_queue_tail(&sdata->skb_queue, rx->skb);
-       ieee80211_queue_work(&rx->local->hw, &sdata->work);
-       if (rx->sta)
-               rx->sta->rx_stats.packets++;
+       ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
 
        return RX_QUEUED;
 }
@@ -3707,11 +3706,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
                return RX_DROP_MONITOR;
        }
 
-       /* queue up frame and kick off work to process it */
-       skb_queue_tail(&sdata->skb_queue, rx->skb);
-       ieee80211_queue_work(&rx->local->hw, &sdata->work);
-       if (rx->sta)
-               rx->sta->rx_stats.packets++;
+       ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb);
 
        return RX_QUEUED;
 }
index d4cc9ac..6b50cb5 100644 (file)
@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
        struct ieee80211_mgmt *mgmt = (void *)skb->data;
        struct ieee80211_bss *bss;
        struct ieee80211_channel *channel;
+       size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+                                     u.probe_resp.variable);
+
+       if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+           !ieee80211_is_beacon(mgmt->frame_control) &&
+           !ieee80211_is_s1g_beacon(mgmt->frame_control))
+               return;
 
        if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
-               if (skb->len < 15)
-                       return;
-       } else if (skb->len < 24 ||
-                (!ieee80211_is_probe_resp(mgmt->frame_control) &&
-                 !ieee80211_is_beacon(mgmt->frame_control)))
+               if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+                       min_hdr_len = offsetof(struct ieee80211_ext,
+                                              u.s1g_short_beacon.variable);
+               else
+                       min_hdr_len = offsetof(struct ieee80211_ext,
+                                              u.s1g_beacon);
+       }
+
+       if (skb->len < min_hdr_len)
                return;
 
        sdata1 = rcu_dereference(local->scan_sdata);
index f2fb69d..a5505ee 100644 (file)
@@ -425,15 +425,11 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        if (sta_prepare_rate_control(local, sta, gfp))
                goto free_txq;
 
-       sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                skb_queue_head_init(&sta->ps_tx_buf[i]);
                skb_queue_head_init(&sta->tx_filtered[i]);
-               sta->airtime[i].deficit = sta->airtime_weight;
-               atomic_set(&sta->airtime[i].aql_tx_pending, 0);
-               sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
-               sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
+               init_airtime_info(&sta->airtime[i], &local->airtime[i]);
        }
 
        for (i = 0; i < IEEE80211_NUM_TIDS; i++)
@@ -1398,11 +1394,6 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
        struct ieee80211_tx_info *info;
        struct ieee80211_chanctx_conf *chanctx_conf;
 
-       /* Don't send NDPs when STA is connected HE */
-       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-           !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
-               return;
-
        if (qos) {
                fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                 IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1897,24 +1888,59 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
 }
 EXPORT_SYMBOL(ieee80211_sta_set_buffered);
 
-void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
-                                   u32 tx_airtime, u32 rx_airtime)
+void ieee80211_register_airtime(struct ieee80211_txq *txq,
+                               u32 tx_airtime, u32 rx_airtime)
 {
-       struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
-       struct ieee80211_local *local = sta->sdata->local;
-       u8 ac = ieee80211_ac_from_tid(tid);
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
+       struct ieee80211_local *local = sdata->local;
+       u64 weight_sum, weight_sum_reciprocal;
+       struct airtime_sched_info *air_sched;
+       struct airtime_info *air_info;
        u32 airtime = 0;
 
-       if (sta->local->airtime_flags & AIRTIME_USE_TX)
+       air_sched = &local->airtime[txq->ac];
+       air_info = to_airtime_info(txq);
+
+       if (local->airtime_flags & AIRTIME_USE_TX)
                airtime += tx_airtime;
-       if (sta->local->airtime_flags & AIRTIME_USE_RX)
+       if (local->airtime_flags & AIRTIME_USE_RX)
                airtime += rx_airtime;
 
-       spin_lock_bh(&local->active_txq_lock[ac]);
-       sta->airtime[ac].tx_airtime += tx_airtime;
-       sta->airtime[ac].rx_airtime += rx_airtime;
-       sta->airtime[ac].deficit -= airtime;
-       spin_unlock_bh(&local->active_txq_lock[ac]);
+       /* Weights scale so the unit weight is 256 */
+       airtime <<= 8;
+
+       spin_lock_bh(&air_sched->lock);
+
+       air_info->tx_airtime += tx_airtime;
+       air_info->rx_airtime += rx_airtime;
+
+       if (air_sched->weight_sum) {
+               weight_sum = air_sched->weight_sum;
+               weight_sum_reciprocal = air_sched->weight_sum_reciprocal;
+       } else {
+               weight_sum = air_info->weight;
+               weight_sum_reciprocal = air_info->weight_reciprocal;
+       }
+
+       /* Round the calculation of global vt */
+       air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) *
+                               weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64;
+       air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) *
+                              air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32;
+       ieee80211_resort_txq(&local->hw, txq);
+
+       spin_unlock_bh(&air_sched->lock);
+}
+
+void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
+                                   u32 tx_airtime, u32 rx_airtime)
+{
+       struct ieee80211_txq *txq = pubsta->txq[tid];
+
+       if (!txq)
+               return;
+
+       ieee80211_register_airtime(txq, tx_airtime, rx_airtime);
 }
 EXPORT_SYMBOL(ieee80211_sta_register_airtime);
 
@@ -2093,10 +2119,9 @@ static struct ieee80211_sta_rx_stats *
 sta_get_last_rx_stats(struct sta_info *sta)
 {
        struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
-       struct ieee80211_local *local = sta->local;
        int cpu;
 
-       if (!ieee80211_hw_check(&local->hw, USES_RSS))
+       if (!sta->pcpu_rx_stats)
                return stats;
 
        for_each_possible_cpu(cpu) {
@@ -2196,9 +2221,7 @@ static void sta_set_tidstats(struct sta_info *sta,
        int cpu;
 
        if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
-               if (!ieee80211_hw_check(&local->hw, USES_RSS))
-                       tidstats->rx_msdu +=
-                               sta_get_tidstats_msdu(&sta->rx_stats, tid);
+               tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid);
 
                if (sta->pcpu_rx_stats) {
                        for_each_possible_cpu(cpu) {
@@ -2277,7 +2300,6 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
                sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
 
        drv_sta_statistics(local, sdata, &sta->sta, sinfo);
-
        sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
                         BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
                         BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
@@ -2312,8 +2334,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
 
        if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
                               BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
-               if (!ieee80211_hw_check(&local->hw, USES_RSS))
-                       sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+               sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
 
                if (sta->pcpu_rx_stats) {
                        for_each_possible_cpu(cpu) {
@@ -2363,7 +2384,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
        }
 
        if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
-               sinfo->airtime_weight = sta->airtime_weight;
+               sinfo->airtime_weight = sta->airtime[0].weight;
                sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
        }
 
index 0333072..ba27967 100644 (file)
@@ -135,18 +135,25 @@ enum ieee80211_agg_stop_reason {
 #define AIRTIME_USE_TX         BIT(0)
 #define AIRTIME_USE_RX         BIT(1)
 
+
 struct airtime_info {
        u64 rx_airtime;
        u64 tx_airtime;
-       s64 deficit;
+       u64 v_t;
+       u64 last_scheduled;
+       struct list_head list;
        atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
        u32 aql_limit_low;
        u32 aql_limit_high;
+       u32 weight_reciprocal;
+       u16 weight;
 };
 
 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
                                          struct sta_info *sta, u8 ac,
                                          u16 tx_airtime, bool tx_completed);
+void ieee80211_register_airtime(struct ieee80211_txq *txq,
+                               u32 tx_airtime, u32 rx_airtime);
 
 struct sta_info;
 
@@ -515,7 +522,6 @@ struct ieee80211_fragment_cache {
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @airtime: per-AC struct airtime_info describing airtime statistics for this
  *     station
- * @airtime_weight: station weight for airtime fairness calculation purposes
  * @ampdu_mlme: A-MPDU state machine state
  * @mesh: mesh STA information
  * @debugfs_dir: debug filesystem directory dentry
@@ -646,7 +652,6 @@ struct sta_info {
        u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
 
        struct airtime_info airtime[IEEE80211_NUM_ACS];
-       u16 airtime_weight;
 
        /*
         * Aggregation information, locked with lock.
index 9baf185..bae321f 100644 (file)
@@ -970,6 +970,25 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
                if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
                        ieee80211_frame_acked(sta, skb);
 
+       } else if (wiphy_ext_feature_isset(local->hw.wiphy,
+                                          NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
+               struct ieee80211_sub_if_data *sdata;
+               struct ieee80211_txq *txq;
+               u32 airtime;
+
+               /* Account airtime to multicast queue */
+               sdata = ieee80211_sdata_from_skb(local, skb);
+
+               if (sdata && (txq = sdata->vif.txq)) {
+                       airtime = info->status.tx_time ?:
+                               ieee80211_calc_expected_tx_airtime(hw,
+                                                                  &sdata->vif,
+                                                                  NULL,
+                                                                  skb->len,
+                                                                  false);
+
+                       ieee80211_register_airtime(txq, airtime, 0);
+               }
        }
 
        /* SNMP counters
@@ -1006,12 +1025,11 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
            ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
            !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
            local->ps_sdata && !(local->scanning)) {
-               if (info->flags & IEEE80211_TX_STAT_ACK) {
+               if (info->flags & IEEE80211_TX_STAT_ACK)
                        local->ps_sdata->u.mgd.flags |=
                                        IEEE80211_STA_NULLFUNC_ACKED;
-               } else
-                       mod_timer(&local->dynamic_ps_timer, jiffies +
-                                       msecs_to_jiffies(10));
+               mod_timer(&local->dynamic_ps_timer,
+                         jiffies + msecs_to_jiffies(10));
        }
 
        ieee80211_report_used_skb(local, skb, false);
index f91d02b..45e532a 100644 (file)
@@ -1920,7 +1920,7 @@ out:
        return ret;
 }
 
-static void
+void
 ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
                                      struct sk_buff *skb)
 {
@@ -1971,32 +1971,6 @@ void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
        rcu_read_unlock();
 }
 
-void ieee80211_tdls_chsw_work(struct work_struct *wk)
-{
-       struct ieee80211_local *local =
-               container_of(wk, struct ieee80211_local, tdls_chsw_work);
-       struct ieee80211_sub_if_data *sdata;
-       struct sk_buff *skb;
-       struct ieee80211_tdls_data *tf;
-
-       wiphy_lock(local->hw.wiphy);
-       while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
-               tf = (struct ieee80211_tdls_data *)skb->data;
-               list_for_each_entry(sdata, &local->interfaces, list) {
-                       if (!ieee80211_sdata_running(sdata) ||
-                           sdata->vif.type != NL80211_IFTYPE_STATION ||
-                           !ether_addr_equal(tf->da, sdata->vif.addr))
-                               continue;
-
-                       ieee80211_process_tdls_channel_switch(sdata, skb);
-                       break;
-               }
-
-               kfree_skb(skb);
-       }
-       wiphy_unlock(local->hw.wiphy);
-}
-
 void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
                                      const u8 *peer, u16 reason)
 {
index 8fcc390..f6ef153 100644 (file)
@@ -2,7 +2,7 @@
 /*
 * Portions of this file
 * Copyright(c) 2016-2017 Intel Deutschland GmbH
-* Copyright (C) 2018 - 2020 Intel Corporation
+* Copyright (C) 2018 - 2021 Intel Corporation
 */
 
 #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -1461,31 +1461,52 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
        TP_ARGS(local, sta, tids, num_frames, reason, more_data)
 );
 
-TRACE_EVENT(drv_mgd_prepare_tx,
+DECLARE_EVENT_CLASS(mgd_prepare_complete_tx_evt,
        TP_PROTO(struct ieee80211_local *local,
                 struct ieee80211_sub_if_data *sdata,
-                u16 duration),
+                u16 duration, u16 subtype, bool success),
 
-       TP_ARGS(local, sdata, duration),
+       TP_ARGS(local, sdata, duration, subtype, success),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
                VIF_ENTRY
                __field(u32, duration)
+               __field(u16, subtype)
+               __field(u8, success)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
                __entry->duration = duration;
+               __entry->subtype = subtype;
+               __entry->success = success;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT VIF_PR_FMT " duration: %u",
-               LOCAL_PR_ARG, VIF_PR_ARG, __entry->duration
+               LOCAL_PR_FMT VIF_PR_FMT " duration: %u, subtype:0x%x, success:%d",
+               LOCAL_PR_ARG, VIF_PR_ARG, __entry->duration,
+               __entry->subtype, __entry->success
        )
 );
 
+DEFINE_EVENT(mgd_prepare_complete_tx_evt, drv_mgd_prepare_tx,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata,
+                u16 duration, u16 subtype, bool success),
+
+       TP_ARGS(local, sdata, duration, subtype, success)
+);
+
+DEFINE_EVENT(mgd_prepare_complete_tx_evt, drv_mgd_complete_tx,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata,
+                u16 duration, u16 subtype, bool success),
+
+       TP_ARGS(local, sdata, duration, subtype, success)
+);
+
 DEFINE_EVENT(local_sdata_evt, drv_mgd_protect_tdls_discover,
        TP_PROTO(struct ieee80211_local *local,
                 struct ieee80211_sub_if_data *sdata),
index 0b719f3..e969811 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bitmap.h>
 #include <linux/rcupdate.h>
 #include <linux/export.h>
+#include <linux/timekeeping.h>
 #include <net/net_namespace.h>
 #include <net/ieee80211_radiotap.h>
 #include <net/cfg80211.h>
@@ -666,6 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        u32 len;
        struct ieee80211_tx_rate_control txrc;
        struct ieee80211_sta_rates *ratetbl = NULL;
+       bool encap = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
        bool assoc = false;
 
        memset(&txrc, 0, sizeof(txrc));
@@ -707,7 +709,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
         * just wants a probe response.
         */
        if (tx->sdata->vif.bss_conf.use_short_preamble &&
-           (ieee80211_is_data(hdr->frame_control) ||
+           (ieee80211_is_tx_data(tx->skb) ||
             (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
                txrc.short_preamble = true;
 
@@ -729,7 +731,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
                 "%s: Dropped data frame as no usable bitrate found while "
                 "scanning and associated. Target station: "
                 "%pM on %d GHz band\n",
-                tx->sdata->name, hdr->addr1,
+                tx->sdata->name,
+                encap ? ((struct ethhdr *)hdr)->h_dest : hdr->addr1,
                 info->band ? 5 : 2))
                return TX_DROP;
 
@@ -763,7 +766,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        if (txrc.reported_rate.idx < 0) {
                txrc.reported_rate = tx->rate;
-               if (tx->sta && ieee80211_is_data(hdr->frame_control))
+               if (tx->sta && ieee80211_is_tx_data(tx->skb))
                        tx->sta->tx_stats.last_rate = txrc.reported_rate;
        } else if (tx->sta)
                tx->sta->tx_stats.last_rate = txrc.reported_rate;
@@ -1447,7 +1450,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
        codel_vars_init(&txqi->def_cvars);
        codel_stats_init(&txqi->cstats);
        __skb_queue_head_init(&txqi->frags);
-       INIT_LIST_HEAD(&txqi->schedule_order);
+       RB_CLEAR_NODE(&txqi->schedule_order);
 
        txqi->txq.vif = &sdata->vif;
 
@@ -1491,9 +1494,7 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
        ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
        spin_unlock_bh(&fq->lock);
 
-       spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
-       list_del_init(&txqi->schedule_order);
-       spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
+       ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
 }
 
 void ieee80211_txq_set_params(struct ieee80211_local *local)
@@ -1768,8 +1769,6 @@ static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
        CALL_TXH(ieee80211_tx_h_ps_buf);
        CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
        CALL_TXH(ieee80211_tx_h_select_key);
-       if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
-               CALL_TXH(ieee80211_tx_h_rate_ctrl);
 
  txh_done:
        if (unlikely(res == TX_DROP)) {
@@ -1802,6 +1801,9 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
                goto txh_done;
        }
 
+       if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
+               CALL_TXH(ieee80211_tx_h_rate_ctrl);
+
        CALL_TXH(ieee80211_tx_h_michael_mic_add);
        CALL_TXH(ieee80211_tx_h_sequence);
        CALL_TXH(ieee80211_tx_h_fragment);
@@ -2014,6 +2016,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx(sdata, sta, skb, false);
 }
 
+static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
+{
+       struct ieee80211_radiotap_header *rthdr =
+               (struct ieee80211_radiotap_header *)skb->data;
+
+       /* check for not even having the fixed radiotap header part */
+       if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+               return false; /* too short to be possibly valid */
+
+       /* is it a header version we can trust to find length from? */
+       if (unlikely(rthdr->it_version))
+               return false; /* only version 0 is supported */
+
+       /* does the skb contain enough to deliver on the alleged length? */
+       if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
+               return false; /* skb too short for claimed rt header extent */
+
+       return true;
+}
+
 bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                                 struct net_device *dev)
 {
@@ -2022,8 +2044,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
        struct ieee80211_radiotap_header *rthdr =
                (struct ieee80211_radiotap_header *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_supported_band *sband =
-               local->hw.wiphy->bands[info->band];
        int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
                                                   NULL);
        u16 txflags;
@@ -2036,17 +2056,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
        u8 vht_mcs = 0, vht_nss = 0;
        int i;
 
-       /* check for not even having the fixed radiotap header part */
-       if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
-               return false; /* too short to be possibly valid */
-
-       /* is it a header version we can trust to find length from? */
-       if (unlikely(rthdr->it_version))
-               return false; /* only version 0 is supported */
-
-       /* does the skb contain enough to deliver on the alleged length? */
-       if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
-               return false; /* skb too short for claimed rt header extent */
+       if (!ieee80211_validate_radiotap_len(skb))
+               return false;
 
        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
                       IEEE80211_TX_CTL_DONTFRAG;
@@ -2186,6 +2197,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                return false;
 
        if (rate_found) {
+               struct ieee80211_supported_band *sband =
+                       local->hw.wiphy->bands[info->band];
+
                info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
 
                for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -2199,7 +2213,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
                } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
                        ieee80211_rate_set_vht(info->control.rates, vht_mcs,
                                               vht_nss);
-               } else {
+               } else if (sband) {
                        for (i = 0; i < sband->n_bitrates; i++) {
                                if (rate * 5 != sband->bitrates[i].bitrate)
                                        continue;
@@ -2236,8 +2250,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
        info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
                      IEEE80211_TX_CTL_INJECTED;
 
-       /* Sanity-check and process the injection radiotap header */
-       if (!ieee80211_parse_tx_radiotap(skb, dev))
+       /* Sanity-check the length of the radiotap header */
+       if (!ieee80211_validate_radiotap_len(skb))
                goto fail;
 
        /* we now know there is a radiotap header with a length we can use */
@@ -2351,6 +2365,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
        ieee80211_select_queue_80211(sdata, skb, hdr);
        skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
 
+       /*
+        * Process the radiotap header. This will now take into account the
+        * selected chandef above to accurately set injection rates and
+        * retransmissions.
+        */
+       if (!ieee80211_parse_tx_radiotap(skb, dev))
+               goto fail_rcu;
+
        /* remove the injection radiotap header */
        skb_pull(skb, len_rthdr);
 
@@ -3264,6 +3286,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
                return false;
 
+       if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+               return false;
+
        if (skb_is_gso(skb))
                return false;
 
@@ -3369,15 +3394,21 @@ out:
  * Can be called while the sta lock is held. Anything that can cause packets to
  * be generated will cause deadlock!
  */
-static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
-                                      struct sta_info *sta, u8 pn_offs,
-                                      struct ieee80211_key *key,
-                                      struct sk_buff *skb)
+static ieee80211_tx_result
+ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
+                          struct sta_info *sta, u8 pn_offs,
+                          struct ieee80211_key *key,
+                          struct ieee80211_tx_data *tx)
 {
+       struct sk_buff *skb = tx->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (void *)skb->data;
        u8 tid = IEEE80211_NUM_TIDS;
 
+       if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL) &&
+           ieee80211_tx_h_rate_ctrl(tx) != TX_CONTINUE)
+               return TX_DROP;
+
        if (key)
                info->control.hw_key = &key->conf;
 
@@ -3426,6 +3457,8 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
                        break;
                }
        }
+
+       return TX_CONTINUE;
 }
 
 static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
@@ -3529,24 +3562,17 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
        tx.sta = sta;
        tx.key = fast_tx->key;
 
-       if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
-               tx.skb = skb;
-               r = ieee80211_tx_h_rate_ctrl(&tx);
-               skb = tx.skb;
-               tx.skb = NULL;
-
-               if (r != TX_CONTINUE) {
-                       if (r != TX_QUEUED)
-                               kfree_skb(skb);
-                       return true;
-               }
-       }
-
        if (ieee80211_queue_skb(local, sdata, sta, skb))
                return true;
 
-       ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
-                                  fast_tx->key, skb);
+       tx.skb = skb;
+       r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
+                                      fast_tx->key, &tx);
+       tx.skb = NULL;
+       if (r == TX_DROP) {
+               kfree_skb(skb);
+               return true;
+       }
 
        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                sdata = container_of(sdata->bss,
@@ -3651,8 +3677,16 @@ begin:
        else
                info->flags &= ~IEEE80211_TX_CTL_AMPDU;
 
-       if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+       if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+               if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
+                       r = ieee80211_tx_h_rate_ctrl(&tx);
+                       if (r != TX_CONTINUE) {
+                               ieee80211_free_txskb(&local->hw, skb);
+                               goto begin;
+                       }
+               }
                goto encap_out;
+       }
 
        if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
                struct sta_info *sta = container_of(txq->sta, struct sta_info,
@@ -3663,8 +3697,12 @@ begin:
                    (tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
                        pn_offs = ieee80211_hdrlen(hdr->frame_control);
 
-               ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
-                                          tx.key, skb);
+               r = ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
+                                              tx.key, &tx);
+               if (r != TX_CONTINUE) {
+                       ieee80211_free_txskb(&local->hw, skb);
+                       goto begin;
+               }
        } else {
                if (invoke_tx_handlers_late(&tx))
                        goto begin;
@@ -3744,102 +3782,259 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct airtime_sched_info *air_sched;
+       u64 now = ktime_get_boottime_ns();
        struct ieee80211_txq *ret = NULL;
-       struct txq_info *txqi = NULL, *head = NULL;
-       bool found_eligible_txq = false;
+       struct airtime_info *air_info;
+       struct txq_info *txqi = NULL;
+       struct rb_node *node;
+       bool first = false;
 
-       spin_lock_bh(&local->active_txq_lock[ac]);
+       air_sched = &local->airtime[ac];
+       spin_lock_bh(&air_sched->lock);
 
- begin:
-       txqi = list_first_entry_or_null(&local->active_txqs[ac],
-                                       struct txq_info,
-                                       schedule_order);
-       if (!txqi)
+       node = air_sched->schedule_pos;
+
+begin:
+       if (!node) {
+               node = rb_first_cached(&air_sched->active_txqs);
+               first = true;
+       } else {
+               node = rb_next(node);
+       }
+
+       if (!node)
                goto out;
 
-       if (txqi == head) {
-               if (!found_eligible_txq)
-                       goto out;
-               else
-                       found_eligible_txq = false;
+       txqi = container_of(node, struct txq_info, schedule_order);
+       air_info = to_airtime_info(&txqi->txq);
+
+       if (air_info->v_t > air_sched->v_t &&
+           (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
+               goto out;
+
+       if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
+               first = false;
+               goto begin;
        }
 
-       if (!head)
-               head = txqi;
+       air_sched->schedule_pos = node;
+       air_sched->last_schedule_activity = now;
+       ret = &txqi->txq;
+out:
+       spin_unlock_bh(&air_sched->lock);
+       return ret;
+}
+EXPORT_SYMBOL(ieee80211_next_txq);
 
-       if (txqi->txq.sta) {
-               struct sta_info *sta = container_of(txqi->txq.sta,
-                                                   struct sta_info, sta);
-               bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
-               s64 deficit = sta->airtime[txqi->txq.ac].deficit;
+static void __ieee80211_insert_txq(struct rb_root_cached *root,
+                                  struct txq_info *txqi)
+{
+       struct rb_node **new = &root->rb_root.rb_node;
+       struct airtime_info *old_air, *new_air;
+       struct rb_node *parent = NULL;
+       struct txq_info *__txqi;
+       bool leftmost = true;
+
+       while (*new) {
+               parent = *new;
+               __txqi = rb_entry(parent, struct txq_info, schedule_order);
+               old_air = to_airtime_info(&__txqi->txq);
+               new_air = to_airtime_info(&txqi->txq);
+
+               if (new_air->v_t <= old_air->v_t) {
+                       new = &parent->rb_left;
+               } else {
+                       new = &parent->rb_right;
+                       leftmost = false;
+               }
+       }
 
-               if (aql_check)
-                       found_eligible_txq = true;
+       rb_link_node(&txqi->schedule_order, parent, new);
+       rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
+}
 
-               if (deficit < 0)
-                       sta->airtime[txqi->txq.ac].deficit +=
-                               sta->airtime_weight;
+void ieee80211_resort_txq(struct ieee80211_hw *hw,
+                         struct ieee80211_txq *txq)
+{
+       struct airtime_info *air_info = to_airtime_info(txq);
+       struct ieee80211_local *local = hw_to_local(hw);
+       struct txq_info *txqi = to_txq_info(txq);
+       struct airtime_sched_info *air_sched;
 
-               if (deficit < 0 || !aql_check) {
-                       list_move_tail(&txqi->schedule_order,
-                                      &local->active_txqs[txqi->txq.ac]);
-                       goto begin;
+       air_sched = &local->airtime[txq->ac];
+
+       lockdep_assert_held(&air_sched->lock);
+
+       if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
+               struct airtime_info *a_prev = NULL, *a_next = NULL;
+               struct txq_info *t_prev, *t_next;
+               struct rb_node *n_prev, *n_next;
+
+               /* Erasing a node can cause an expensive rebalancing operation,
+                * so we check the previous and next nodes first and only remove
+                * and re-insert if the current node is not already in the
+                * correct position.
+                */
+               if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
+                       t_prev = container_of(n_prev, struct txq_info,
+                                             schedule_order);
+                       a_prev = to_airtime_info(&t_prev->txq);
+               }
+
+               if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
+                       t_next = container_of(n_next, struct txq_info,
+                                             schedule_order);
+                       a_next = to_airtime_info(&t_next->txq);
                }
+
+               if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
+                   (!a_next || a_next->v_t > air_info->v_t))
+                       return;
+
+               if (air_sched->schedule_pos == &txqi->schedule_order)
+                       air_sched->schedule_pos = n_prev;
+
+               rb_erase_cached(&txqi->schedule_order,
+                               &air_sched->active_txqs);
+               RB_CLEAR_NODE(&txqi->schedule_order);
+               __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
        }
+}
+
+void ieee80211_update_airtime_weight(struct ieee80211_local *local,
+                                    struct airtime_sched_info *air_sched,
+                                    u64 now, bool force)
+{
+       struct airtime_info *air_info, *tmp;
+       u64 weight_sum = 0;
+
+       if (unlikely(!now))
+               now = ktime_get_boottime_ns();
 
+       lockdep_assert_held(&air_sched->lock);
+
+       if (!force && (air_sched->last_weight_update <
+                      now - AIRTIME_ACTIVE_DURATION))
+               return;
+
+       list_for_each_entry_safe(air_info, tmp,
+                                &air_sched->active_list, list) {
+               if (airtime_is_active(air_info, now))
+                       weight_sum += air_info->weight;
+               else
+                       list_del_init(&air_info->list);
+       }
+       airtime_weight_sum_set(air_sched, weight_sum);
+       air_sched->last_weight_update = now;
+}
 
-       if (txqi->schedule_round == local->schedule_round[ac])
+void ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                           struct ieee80211_txq *txq)
+       __acquires(txq_lock) __releases(txq_lock)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
+       struct txq_info *txqi = to_txq_info(txq);
+       struct airtime_sched_info *air_sched;
+       u64 now = ktime_get_boottime_ns();
+       struct airtime_info *air_info;
+       u8 ac = txq->ac;
+       bool was_active;
+
+       air_sched = &local->airtime[ac];
+       air_info = to_airtime_info(txq);
+
+       spin_lock_bh(&air_sched->lock);
+       was_active = airtime_is_active(air_info, now);
+       airtime_set_active(air_sched, air_info, now);
+
+       if (!RB_EMPTY_NODE(&txqi->schedule_order))
                goto out;
 
-       list_del_init(&txqi->schedule_order);
-       txqi->schedule_round = local->schedule_round[ac];
-       ret = &txqi->txq;
+       /* If the station has been inactive for a while, catch up its v_t so it
+        * doesn't get indefinite priority; see comment above the definition of
+        * AIRTIME_MAX_BEHIND.
+        */
+       if ((!was_active && air_info->v_t < air_sched->v_t) ||
+           air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
+               air_info->v_t = air_sched->v_t;
+
+       ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
+       __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
 
 out:
-       spin_unlock_bh(&local->active_txq_lock[ac]);
-       return ret;
+       spin_unlock_bh(&air_sched->lock);
 }
-EXPORT_SYMBOL(ieee80211_next_txq);
+EXPORT_SYMBOL(ieee80211_schedule_txq);
 
-void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
-                             struct ieee80211_txq *txq,
-                             bool force)
+static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
+                                      struct ieee80211_txq *txq,
+                                      bool purge)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct txq_info *txqi = to_txq_info(txq);
+       struct airtime_sched_info *air_sched;
+       struct airtime_info *air_info;
 
-       spin_lock_bh(&local->active_txq_lock[txq->ac]);
-
-       if (list_empty(&txqi->schedule_order) &&
-           (force || !skb_queue_empty(&txqi->frags) ||
-            txqi->tin.backlog_packets)) {
-               /* If airtime accounting is active, always enqueue STAs at the
-                * head of the list to ensure that they only get moved to the
-                * back by the airtime DRR scheduler once they have a negative
-                * deficit. A station that already has a negative deficit will
-                * get immediately moved to the back of the list on the next
-                * call to ieee80211_next_txq().
-                */
-               if (txqi->txq.sta && local->airtime_flags &&
-                   wiphy_ext_feature_isset(local->hw.wiphy,
-                                           NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
-                       list_add(&txqi->schedule_order,
-                                &local->active_txqs[txq->ac]);
-               else
-                       list_add_tail(&txqi->schedule_order,
-                                     &local->active_txqs[txq->ac]);
+       air_sched = &local->airtime[txq->ac];
+       air_info = to_airtime_info(&txqi->txq);
+
+       lockdep_assert_held(&air_sched->lock);
+
+       if (purge) {
+               list_del_init(&air_info->list);
+               ieee80211_update_airtime_weight(local, air_sched, 0, true);
        }
 
-       spin_unlock_bh(&local->active_txq_lock[txq->ac]);
+       if (RB_EMPTY_NODE(&txqi->schedule_order))
+               return;
+
+       if (air_sched->schedule_pos == &txqi->schedule_order)
+               air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
+
+       if (!purge)
+               airtime_set_active(air_sched, air_info,
+                                  ktime_get_boottime_ns());
+
+       rb_erase_cached(&txqi->schedule_order,
+                       &air_sched->active_txqs);
+       RB_CLEAR_NODE(&txqi->schedule_order);
 }
-EXPORT_SYMBOL(__ieee80211_schedule_txq);
+
+void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq,
+                             bool purge)
+       __acquires(txq_lock) __releases(txq_lock)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
+
+       spin_lock_bh(&local->airtime[txq->ac].lock);
+       __ieee80211_unschedule_txq(hw, txq, purge);
+       spin_unlock_bh(&local->airtime[txq->ac].lock);
+}
+
+void ieee80211_return_txq(struct ieee80211_hw *hw,
+                         struct ieee80211_txq *txq, bool force)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
+       struct txq_info *txqi = to_txq_info(txq);
+
+       spin_lock_bh(&local->airtime[txq->ac].lock);
+
+       if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
+           !txq_has_queue(txq))
+               __ieee80211_unschedule_txq(hw, txq, false);
+
+       spin_unlock_bh(&local->airtime[txq->ac].lock);
+}
+EXPORT_SYMBOL(ieee80211_return_txq);
 
 DEFINE_STATIC_KEY_FALSE(aql_disable);
 
 bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
                                 struct ieee80211_txq *txq)
 {
-       struct sta_info *sta;
+       struct airtime_info *air_info = to_airtime_info(txq);
        struct ieee80211_local *local = hw_to_local(hw);
 
        if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
@@ -3854,15 +4049,12 @@ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
        if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
                return true;
 
-       sta = container_of(txq->sta, struct sta_info, sta);
-       if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
-           sta->airtime[txq->ac].aql_limit_low)
+       if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
                return true;
 
        if (atomic_read(&local->aql_total_pending_airtime) <
            local->aql_threshold &&
-           atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
-           sta->airtime[txq->ac].aql_limit_high)
+           atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
                return true;
 
        return false;
@@ -3872,63 +4064,85 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_check);
 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
                                struct ieee80211_txq *txq)
 {
+       struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
        struct ieee80211_local *local = hw_to_local(hw);
-       struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
-       struct sta_info *sta;
-       u8 ac = txq->ac;
+       struct airtime_sched_info *air_sched;
+       struct airtime_info *air_info;
+       struct rb_node *node = NULL;
+       bool ret = false;
+       u64 now;
 
-       spin_lock_bh(&local->active_txq_lock[ac]);
 
-       if (!txqi->txq.sta)
-               goto out;
+       if (!ieee80211_txq_airtime_check(hw, txq))
+               return false;
+
+       air_sched = &local->airtime[txq->ac];
+       spin_lock_bh(&air_sched->lock);
 
-       if (list_empty(&txqi->schedule_order))
+       if (RB_EMPTY_NODE(&txqi->schedule_order))
                goto out;
 
-       list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
-                                schedule_order) {
-               if (iter == txqi)
-                       break;
+       now = ktime_get_boottime_ns();
 
-               if (!iter->txq.sta) {
-                       list_move_tail(&iter->schedule_order,
-                                      &local->active_txqs[ac]);
-                       continue;
-               }
-               sta = container_of(iter->txq.sta, struct sta_info, sta);
-               if (sta->airtime[ac].deficit < 0)
-                       sta->airtime[ac].deficit += sta->airtime_weight;
-               list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
-       }
+       /* Like in ieee80211_next_txq(), make sure the first station in the
+        * scheduling order is eligible for transmission to avoid starvation.
+        */
+       node = rb_first_cached(&air_sched->active_txqs);
+       if (node) {
+               first_txqi = container_of(node, struct txq_info,
+                                         schedule_order);
+               air_info = to_airtime_info(&first_txqi->txq);
 
-       sta = container_of(txqi->txq.sta, struct sta_info, sta);
-       if (sta->airtime[ac].deficit >= 0)
-               goto out;
+               if (air_sched->v_t < air_info->v_t)
+                       airtime_catchup_v_t(air_sched, air_info->v_t, now);
+       }
 
-       sta->airtime[ac].deficit += sta->airtime_weight;
-       list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
-       spin_unlock_bh(&local->active_txq_lock[ac]);
+       air_info = to_airtime_info(&txqi->txq);
+       if (air_info->v_t <= air_sched->v_t) {
+               air_sched->last_schedule_activity = now;
+               ret = true;
+       }
 
-       return false;
 out:
-       if (!list_empty(&txqi->schedule_order))
-               list_del_init(&txqi->schedule_order);
-       spin_unlock_bh(&local->active_txq_lock[ac]);
-
-       return true;
+       spin_unlock_bh(&air_sched->lock);
+       return ret;
 }
 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
 
 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct airtime_sched_info *air_sched = &local->airtime[ac];
 
-       spin_lock_bh(&local->active_txq_lock[ac]);
-       local->schedule_round[ac]++;
-       spin_unlock_bh(&local->active_txq_lock[ac]);
+       spin_lock_bh(&air_sched->lock);
+       air_sched->schedule_pos = NULL;
+       spin_unlock_bh(&air_sched->lock);
 }
 EXPORT_SYMBOL(ieee80211_txq_schedule_start);
 
+static void
+ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
+                    struct sta_info *sta,
+                    struct sk_buff *skb)
+{
+       struct rate_control_ref *ref = sdata->local->rate_ctrl;
+       u16 tid;
+
+       if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
+               return;
+
+       if (!sta || !sta->sta.ht_cap.ht_supported ||
+           !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
+           skb->protocol == sdata->control_port_protocol)
+               return;
+
+       tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+       if (likely(sta->ampdu_mlme.tid_tx[tid]))
+               return;
+
+       ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
+}
+
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
                                  u32 info_flags,
@@ -3959,6 +4173,8 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                skb_get_hash(skb);
        }
 
+       ieee80211_aggr_check(sdata, sta, skb);
+
        if (sta) {
                struct ieee80211_fast_tx *fast_tx;
 
@@ -4222,6 +4438,8 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
 
        memset(info, 0, sizeof(*info));
 
+       ieee80211_aggr_check(sdata, sta, skb);
+
        tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
        tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
        if (tid_tx) {
index 0a0481f..05e9621 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017     Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  *
  * utilities for mac80211
  */
@@ -947,7 +947,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
 
        switch (elem->data[0]) {
        case WLAN_EID_EXT_HE_MU_EDCA:
-               if (len == sizeof(*elems->mu_edca_param_set)) {
+               if (len >= sizeof(*elems->mu_edca_param_set)) {
                        elems->mu_edca_param_set = data;
                        if (crc)
                                *crc = crc32_be(*crc, (void *)elem,
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                }
                break;
        case WLAN_EID_EXT_UORA:
-               if (len == 1)
+               if (len >= 1)
                        elems->uora_element = data;
                break;
        case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
@@ -976,7 +976,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                        elems->max_channel_switch_time = data;
                break;
        case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
-               if (len == sizeof(*elems->mbssid_config_ie))
+               if (len >= sizeof(*elems->mbssid_config_ie))
                        elems->mbssid_config_ie = data;
                break;
        case WLAN_EID_EXT_HE_SPR:
@@ -985,7 +985,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                        elems->he_spr = data;
                break;
        case WLAN_EID_EXT_HE_6GHZ_CAPA:
-               if (len == sizeof(*elems->he_6ghz_capa))
+               if (len >= sizeof(*elems->he_6ghz_capa))
                        elems->he_6ghz_capa = data;
                break;
        }
@@ -1074,14 +1074,14 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
 
                switch (id) {
                case WLAN_EID_LINK_ID:
-                       if (elen + 2 != sizeof(struct ieee80211_tdls_lnkie)) {
+                       if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
                                elem_parse_failed = true;
                                break;
                        }
                        elems->lnk_id = (void *)(pos - 2);
                        break;
                case WLAN_EID_CHAN_SWITCH_TIMING:
-                       if (elen != sizeof(struct ieee80211_ch_switch_timing)) {
+                       if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
                                elem_parse_failed = true;
                                break;
                        }
@@ -1244,7 +1244,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        elems->sec_chan_offs = (void *)pos;
                        break;
                case WLAN_EID_CHAN_SWITCH_PARAM:
-                       if (elen !=
+                       if (elen <
                            sizeof(*elems->mesh_chansw_params_ie)) {
                                elem_parse_failed = true;
                                break;
@@ -1253,7 +1253,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        break;
                case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
                        if (!action ||
-                           elen != sizeof(*elems->wide_bw_chansw_ie)) {
+                           elen < sizeof(*elems->wide_bw_chansw_ie)) {
                                elem_parse_failed = true;
                                break;
                        }
@@ -1272,7 +1272,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        ie = cfg80211_find_ie(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
                                              pos, elen);
                        if (ie) {
-                               if (ie[1] == sizeof(*elems->wide_bw_chansw_ie))
+                               if (ie[1] >= sizeof(*elems->wide_bw_chansw_ie))
                                        elems->wide_bw_chansw_ie =
                                                (void *)(ie + 2);
                                else
@@ -1316,7 +1316,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        elems->cisco_dtpc_elem = pos;
                        break;
                case WLAN_EID_ADDBA_EXT:
-                       if (elen != sizeof(struct ieee80211_addba_ext_ie)) {
+                       if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
                                elem_parse_failed = true;
                                break;
                        }
@@ -1342,7 +1342,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                                                          elem, elems);
                        break;
                case WLAN_EID_S1G_CAPABILITIES:
-                       if (elen == sizeof(*elems->s1g_capab))
+                       if (elen >= sizeof(*elems->s1g_capab))
                                elems->s1g_capab = (void *)pos;
                        else
                                elem_parse_failed = true;
@@ -1693,7 +1693,10 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
        if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) {
                mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
                err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx);
-               WARN_ON(err);
+               if (WARN_ON(err)) {
+                       kfree_skb(skb);
+                       return;
+               }
        }
 
        IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
@@ -1934,13 +1937,26 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
                *offset = noffset;
        }
 
-       he_cap = ieee80211_get_he_sta_cap(sband);
-       if (he_cap) {
+       he_cap = ieee80211_get_he_iftype_cap(sband,
+                                            ieee80211_vif_type_p2p(&sdata->vif));
+       if (he_cap &&
+           cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+                                        IEEE80211_CHAN_NO_HE)) {
                pos = ieee80211_ie_build_he_cap(pos, he_cap, end);
                if (!pos)
                        goto out_err;
+       }
+
+       if (cfg80211_any_usable_channels(local->hw.wiphy,
+                                        BIT(NL80211_BAND_6GHZ),
+                                        IEEE80211_CHAN_NO_HE)) {
+               struct ieee80211_supported_band *sband6;
+
+               sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
+               he_cap = ieee80211_get_he_iftype_cap(sband6,
+                               ieee80211_vif_type_p2p(&sdata->vif));
 
-               if (sband->band == NL80211_BAND_6GHZ) {
+               if (he_cap) {
                        enum nl80211_iftype iftype =
                                ieee80211_vif_type_p2p(&sdata->vif);
                        __le16 cap = ieee80211_get_he_6ghz_capa(sband, iftype);
@@ -2178,8 +2194,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
        list_for_each_entry(ctx, &local->chanctx_list, list)
                ctx->driver_present = false;
        mutex_unlock(&local->chanctx_mtx);
-
-       cfg80211_shutdown_all_interfaces(local->hw.wiphy);
 }
 
 static void ieee80211_assign_chanctx(struct ieee80211_local *local,
@@ -2946,12 +2960,15 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
        u8 *pos;
        u16 cap;
 
-       sband = ieee80211_get_sband(sdata);
-       if (!sband)
+       if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy,
+                                         BIT(NL80211_BAND_6GHZ),
+                                         IEEE80211_CHAN_NO_HE))
                return;
 
+       sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ];
+
        iftd = ieee80211_get_sband_iftype_data(sband, iftype);
-       if (WARN_ON(!iftd))
+       if (!iftd)
                return;
 
        /* Check for device HE 6 GHz capability before adding element */
index 1ec4d36..7d738bd 100644 (file)
@@ -23,6 +23,8 @@ struct mptcp_pernet {
 
        u8 mptcp_enabled;
        unsigned int add_addr_timeout;
+       u8 checksum_enabled;
+       u8 allow_join_initial_addr_port;
 };
 
 static struct mptcp_pernet *mptcp_get_pernet(struct net *net)
@@ -40,10 +42,22 @@ unsigned int mptcp_get_add_addr_timeout(struct net *net)
        return mptcp_get_pernet(net)->add_addr_timeout;
 }
 
+int mptcp_is_checksum_enabled(struct net *net)
+{
+       return mptcp_get_pernet(net)->checksum_enabled;
+}
+
+int mptcp_allow_join_id0(struct net *net)
+{
+       return mptcp_get_pernet(net)->allow_join_initial_addr_port;
+}
+
 static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
 {
        pernet->mptcp_enabled = 1;
        pernet->add_addr_timeout = TCP_RTO_MAX;
+       pernet->checksum_enabled = 0;
+       pernet->allow_join_initial_addr_port = 1;
 }
 
 #ifdef CONFIG_SYSCTL
@@ -65,6 +79,22 @@ static struct ctl_table mptcp_sysctl_table[] = {
                .mode = 0644,
                .proc_handler = proc_dointvec_jiffies,
        },
+       {
+               .procname = "checksum_enabled",
+               .maxlen = sizeof(u8),
+               .mode = 0644,
+               .proc_handler = proc_dou8vec_minmax,
+               .extra1       = SYSCTL_ZERO,
+               .extra2       = SYSCTL_ONE
+       },
+       {
+               .procname = "allow_join_initial_addr_port",
+               .maxlen = sizeof(u8),
+               .mode = 0644,
+               .proc_handler = proc_dou8vec_minmax,
+               .extra1       = SYSCTL_ZERO,
+               .extra2       = SYSCTL_ONE
+       },
        {}
 };
 
@@ -82,6 +112,8 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
 
        table[0].data = &pernet->mptcp_enabled;
        table[1].data = &pernet->add_addr_timeout;
+       table[2].data = &pernet->checksum_enabled;
+       table[3].data = &pernet->allow_join_initial_addr_port;
 
        hdr = register_net_sysctl(net, MPTCP_SYSCTL_PATH, table);
        if (!hdr)
index eb2dc6d..52ea251 100644 (file)
@@ -25,6 +25,8 @@ static const struct snmp_mib mptcp_snmp_list[] = {
        SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
        SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
        SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
+       SNMP_MIB_ITEM("DSSNoMatchTCP", MPTCP_MIB_DSSTCPMISMATCH),
+       SNMP_MIB_ITEM("DataCsumErr", MPTCP_MIB_DATACSUMERR),
        SNMP_MIB_ITEM("OFOQueueTail", MPTCP_MIB_OFOQUEUETAIL),
        SNMP_MIB_ITEM("OFOQueue", MPTCP_MIB_OFOQUEUE),
        SNMP_MIB_ITEM("OFOMerge", MPTCP_MIB_OFOMERGE),
index f0da4f0..193466c 100644 (file)
@@ -18,6 +18,8 @@ enum linux_mptcp_mib_field {
        MPTCP_MIB_JOINACKMAC,           /* HMAC was wrong on ACK + MP_JOIN */
        MPTCP_MIB_DSSNOMATCH,           /* Received a new mapping that did not match the previous one */
        MPTCP_MIB_INFINITEMAPRX,        /* Received an infinite mapping */
+       MPTCP_MIB_DSSTCPMISMATCH,       /* DSS-mapping did not map with TCP's sequence numbers */
+       MPTCP_MIB_DATACSUMERR,          /* The data checksum fail */
        MPTCP_MIB_OFOQUEUETAIL, /* Segments inserted into OoO queue tail */
        MPTCP_MIB_OFOQUEUE,             /* Segments inserted into OoO queue */
        MPTCP_MIB_OFOMERGE,             /* Segments merged in OoO queue */
index f16d9b5..8f88dde 100644 (file)
@@ -144,6 +144,7 @@ static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
        info->mptcpi_write_seq = READ_ONCE(msk->write_seq);
        info->mptcpi_snd_una = READ_ONCE(msk->snd_una);
        info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq);
+       info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled);
        unlock_sock_fast(sk, slow);
 }
 
index 6b825fb..a052709 100644 (file)
@@ -44,7 +44,20 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                        else
                                expected_opsize = TCPOLEN_MPTCP_MPC_SYN;
                }
-               if (opsize != expected_opsize)
+
+               /* Cfr RFC 8684 Section 3.3.0:
+                * If a checksum is present but its use had
+                * not been negotiated in the MP_CAPABLE handshake, the receiver MUST
+                * close the subflow with a RST, as it is not behaving as negotiated.
+                * If a checksum is not present when its use has been negotiated, the
+                * receiver MUST close the subflow with a RST, as it is considered
+                * broken
+                * We parse even option with mismatching csum presence, so that
+                * later in subflow_data_ready we can trigger the reset.
+                */
+               if (opsize != expected_opsize &&
+                   (expected_opsize != TCPOLEN_MPTCP_MPC_ACK_DATA ||
+                    opsize != TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM))
                        break;
 
                /* try to be gentle vs future versions on the initial syn */
@@ -66,16 +79,12 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                 * host requires the use of checksums, checksums MUST be used.
                 * In other words, the only way for checksums not to be used
                 * is if both hosts in their SYNs set A=0."
-                *
-                * Section 3.3.0:
-                * "If a checksum is not present when its use has been
-                * negotiated, the receiver MUST close the subflow with a RST as
-                * it is considered broken."
-                *
-                * We don't implement DSS checksum - fall back to TCP.
                 */
                if (flags & MPTCP_CAP_CHECKSUM_REQD)
-                       break;
+                       mp_opt->csum_reqd = 1;
+
+               if (flags & MPTCP_CAP_DENY_JOIN_ID0)
+                       mp_opt->deny_join_id0 = 1;
 
                mp_opt->mp_capable = 1;
                if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) {
@@ -86,7 +95,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                        mp_opt->rcvr_key = get_unaligned_be64(ptr);
                        ptr += 8;
                }
-               if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA) {
+               if (opsize >= TCPOLEN_MPTCP_MPC_ACK_DATA) {
                        /* Section 3.1.:
                         * "the data parameters in a MP_CAPABLE are semantically
                         * equivalent to those in a DSS option and can be used
@@ -98,9 +107,14 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                        mp_opt->data_len = get_unaligned_be16(ptr);
                        ptr += 2;
                }
-               pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d",
+               if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
+                       mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
+                       mp_opt->csum_reqd = 1;
+                       ptr += 2;
+               }
+               pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
                         version, flags, opsize, mp_opt->sndr_key,
-                        mp_opt->rcvr_key, mp_opt->data_len);
+                        mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
                break;
 
        case MPTCPOPT_MP_JOIN:
@@ -171,10 +185,8 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                                expected_opsize += TCPOLEN_MPTCP_DSS_MAP32;
                }
 
-               /* RFC 6824, Section 3.3:
-                * If a checksum is present, but its use had
-                * not been negotiated in the MP_CAPABLE handshake,
-                * the checksum field MUST be ignored.
+               /* Always parse any csum presence combination, we will enforce
+                * RFC 8684 Section 3.3.0 checks later in subflow_data_ready
                 */
                if (opsize != expected_opsize &&
                    opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM)
@@ -209,9 +221,15 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                        mp_opt->data_len = get_unaligned_be16(ptr);
                        ptr += 2;
 
-                       pr_debug("data_seq=%llu subflow_seq=%u data_len=%u",
+                       if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
+                               mp_opt->csum_reqd = 1;
+                               mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
+                               ptr += 2;
+                       }
+
+                       pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
                                 mp_opt->data_seq, mp_opt->subflow_seq,
-                                mp_opt->data_len);
+                                mp_opt->data_len, mp_opt->csum_reqd, mp_opt->csum);
                }
 
                break;
@@ -323,9 +341,12 @@ static void mptcp_parse_option(const struct sk_buff *skb,
        }
 }
 
-void mptcp_get_options(const struct sk_buff *skb,
+void mptcp_get_options(const struct sock *sk,
+                      const struct sk_buff *skb,
                       struct mptcp_options_received *mp_opt)
 {
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
        const struct tcphdr *th = tcp_hdr(skb);
        const unsigned char *ptr;
        int length;
@@ -341,6 +362,8 @@ void mptcp_get_options(const struct sk_buff *skb,
        mp_opt->dss = 0;
        mp_opt->mp_prio = 0;
        mp_opt->reset = 0;
+       mp_opt->csum_reqd = READ_ONCE(msk->csum_enabled);
+       mp_opt->deny_join_id0 = 0;
 
        length = (th->doff * 4) - sizeof(struct tcphdr);
        ptr = (const unsigned char *)(th + 1);
@@ -356,6 +379,8 @@ void mptcp_get_options(const struct sk_buff *skb,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize = *ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
@@ -380,6 +405,8 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
        subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
        if (subflow->request_mptcp) {
                opts->suboptions = OPTION_MPTCP_MPC_SYN;
+               opts->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk));
+               opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
                *size = TCPOLEN_MPTCP_MPC_SYN;
                return true;
        } else if (subflow->request_join) {
@@ -435,8 +462,10 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
                                         struct mptcp_out_options *opts)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
        struct mptcp_ext *mpext;
        unsigned int data_len;
+       u8 len;
 
        /* When skb is not available, we better over-estimate the emitted
         * options len. A full DSS option (28 bytes) is longer than
@@ -465,16 +494,27 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
                opts->suboptions = OPTION_MPTCP_MPC_ACK;
                opts->sndr_key = subflow->local_key;
                opts->rcvr_key = subflow->remote_key;
+               opts->csum_reqd = READ_ONCE(msk->csum_enabled);
+               opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
 
                /* Section 3.1.
                 * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK
                 * packets that start the first subflow of an MPTCP connection,
                 * as well as the first packet that carries data
                 */
-               if (data_len > 0)
-                       *size = ALIGN(TCPOLEN_MPTCP_MPC_ACK_DATA, 4);
-               else
+               if (data_len > 0) {
+                       len = TCPOLEN_MPTCP_MPC_ACK_DATA;
+                       if (opts->csum_reqd) {
+                               /* we need to propagate more info to csum the pseudo hdr */
+                               opts->ext_copy.data_seq = mpext->data_seq;
+                               opts->ext_copy.subflow_seq = mpext->subflow_seq;
+                               opts->ext_copy.csum = mpext->csum;
+                               len += TCPOLEN_MPTCP_DSS_CHECKSUM;
+                       }
+                       *size = ALIGN(len, 4);
+               } else {
                        *size = TCPOLEN_MPTCP_MPC_ACK;
+               }
 
                pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
                         subflow, subflow->local_key, subflow->remote_key,
@@ -535,18 +575,21 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
        bool ret = false;
        u64 ack_seq;
 
+       opts->csum_reqd = READ_ONCE(msk->csum_enabled);
        mpext = skb ? mptcp_get_ext(skb) : NULL;
 
        if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
-               unsigned int map_size;
+               unsigned int map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
 
-               map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
+               if (mpext) {
+                       if (opts->csum_reqd)
+                               map_size += TCPOLEN_MPTCP_DSS_CHECKSUM;
 
-               remaining -= map_size;
-               dss_size = map_size;
-               if (mpext)
                        opts->ext_copy = *mpext;
+               }
 
+               remaining -= map_size;
+               dss_size = map_size;
                if (skb && snd_data_fin_enable)
                        mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
                ret = true;
@@ -789,6 +832,8 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
        if (subflow_req->mp_capable) {
                opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
                opts->sndr_key = subflow_req->local_key;
+               opts->csum_reqd = subflow_req->csum_reqd;
+               opts->allow_join_id0 = subflow_req->allow_join_id0;
                *size = TCPOLEN_MPTCP_MPC_SYNACK;
                pr_debug("subflow_req=%p, local_key=%llu",
                         subflow_req, subflow_req->local_key);
@@ -867,6 +912,9 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
                return false;
        }
 
+       if (mp_opt->deny_join_id0)
+               WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+
        if (unlikely(!READ_ONCE(msk->pm.server_side)))
                pr_warn_once("bogus mpc option on established client sk");
        mptcp_subflow_fully_established(subflow, mp_opt);
@@ -1007,7 +1055,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
                return;
        }
 
-       mptcp_get_options(skb, &mp_opt);
+       mptcp_get_options(sk, skb, &mp_opt);
        if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
                return;
 
@@ -1099,6 +1147,10 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
                }
                mpext->data_len = mp_opt.data_len;
                mpext->use_map = 1;
+               mpext->csum_reqd = mp_opt.csum_reqd;
+
+               if (mpext->csum_reqd)
+                       mpext->csum = mp_opt.csum;
        }
 }
 
@@ -1118,25 +1170,53 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
                WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
 }
 
+static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
+{
+       struct csum_pseudo_header header;
+       __wsum csum;
+
+       /* cfr RFC 8684 3.3.1.:
+        * the data sequence number used in the pseudo-header is
+        * always the 64-bit value, irrespective of what length is used in the
+        * DSS option itself.
+        */
+       header.data_seq = cpu_to_be64(mpext->data_seq);
+       header.subflow_seq = htonl(mpext->subflow_seq);
+       header.data_len = htons(mpext->data_len);
+       header.csum = 0;
+
+       csum = csum_partial(&header, sizeof(header), ~csum_unfold(mpext->csum));
+       return (__force u16)csum_fold(csum);
+}
+
 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
                         struct mptcp_out_options *opts)
 {
        if ((OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK |
             OPTION_MPTCP_MPC_ACK) & opts->suboptions) {
-               u8 len;
+               u8 len, flag = MPTCP_CAP_HMAC_SHA256;
 
-               if (OPTION_MPTCP_MPC_SYN & opts->suboptions)
+               if (OPTION_MPTCP_MPC_SYN & opts->suboptions) {
                        len = TCPOLEN_MPTCP_MPC_SYN;
-               else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions)
+               } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) {
                        len = TCPOLEN_MPTCP_MPC_SYNACK;
-               else if (opts->ext_copy.data_len)
+               } else if (opts->ext_copy.data_len) {
                        len = TCPOLEN_MPTCP_MPC_ACK_DATA;
-               else
+                       if (opts->csum_reqd)
+                               len += TCPOLEN_MPTCP_DSS_CHECKSUM;
+               } else {
                        len = TCPOLEN_MPTCP_MPC_ACK;
+               }
+
+               if (opts->csum_reqd)
+                       flag |= MPTCP_CAP_CHECKSUM_REQD;
+
+               if (!opts->allow_join_id0)
+                       flag |= MPTCP_CAP_DENY_JOIN_ID0;
 
                *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
                                      MPTCP_SUPPORTED_VERSION,
-                                     MPTCP_CAP_HMAC_SHA256);
+                                     flag);
 
                if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
                    opts->suboptions))
@@ -1152,8 +1232,13 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
                if (!opts->ext_copy.data_len)
                        goto mp_capable_done;
 
-               put_unaligned_be32(opts->ext_copy.data_len << 16 |
-                                  TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+               if (opts->csum_reqd) {
+                       put_unaligned_be32(opts->ext_copy.data_len << 16 |
+                                          mptcp_make_csum(&opts->ext_copy), ptr);
+               } else {
+                       put_unaligned_be32(opts->ext_copy.data_len << 16 |
+                                          TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+               }
                ptr += 1;
        }
 
@@ -1305,6 +1390,9 @@ mp_capable_done:
                        flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64;
                        if (mpext->data_fin)
                                flags |= MPTCP_DSS_DATA_FIN;
+
+                       if (opts->csum_reqd)
+                               len += TCPOLEN_MPTCP_DSS_CHECKSUM;
                }
 
                *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
@@ -1324,8 +1412,13 @@ mp_capable_done:
                        ptr += 2;
                        put_unaligned_be32(mpext->subflow_seq, ptr);
                        ptr += 1;
-                       put_unaligned_be32(mpext->data_len << 16 |
-                                          TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+                       if (opts->csum_reqd) {
+                               put_unaligned_be32(mpext->data_len << 16 |
+                                                  mptcp_make_csum(mpext), ptr);
+                       } else {
+                               put_unaligned_be32(mpext->data_len << 16 |
+                                                  TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+                       }
                }
        }
 
index 9d00fa6..639271e 100644 (file)
@@ -320,6 +320,7 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
        WRITE_ONCE(msk->pm.addr_signal, 0);
        WRITE_ONCE(msk->pm.accept_addr, false);
        WRITE_ONCE(msk->pm.accept_subflow, false);
+       WRITE_ONCE(msk->pm.remote_deny_join_id0, false);
        msk->pm.status = 0;
 
        spin_lock_init(&msk->pm.lock);
index 0972259..d2591eb 100644 (file)
@@ -451,7 +451,8 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
 
        /* check if should create a new subflow */
        if (msk->pm.local_addr_used < local_addr_max &&
-           msk->pm.subflows < subflows_max) {
+           msk->pm.subflows < subflows_max &&
+           !READ_ONCE(msk->pm.remote_deny_join_id0)) {
                local = select_local_address(pernet, msk);
                if (local) {
                        struct mptcp_addr_info remote = { 0 };
@@ -540,6 +541,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
        subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
        if (subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               bool slow;
 
                spin_unlock_bh(&msk->pm.lock);
                pr_debug("send ack for %s%s%s",
@@ -547,9 +549,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
                         mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
                         mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
 
-               lock_sock(ssk);
+               slow = lock_sock_fast(ssk);
                tcp_send_ack(ssk);
-               release_sock(ssk);
+               unlock_sock_fast(ssk, slow);
                spin_lock_bh(&msk->pm.lock);
        }
 }
@@ -566,6 +568,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
                struct sock *sk = (struct sock *)msk;
                struct mptcp_addr_info local;
+               bool slow;
 
                local_address((struct sock_common *)ssk, &local);
                if (!addresses_equal(&local, addr, addr->port))
@@ -578,9 +581,9 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
 
                spin_unlock_bh(&msk->pm.lock);
                pr_debug("send ack for mp_prio");
-               lock_sock(ssk);
+               slow = lock_sock_fast(ssk);
                tcp_send_ack(ssk);
-               release_sock(ssk);
+               unlock_sock_fast(ssk, slow);
                spin_lock_bh(&msk->pm.lock);
 
                return 0;
index 9930950..7bb8242 100644 (file)
@@ -286,11 +286,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
 
        /* try to fetch required memory from subflow */
        if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
-               if (ssk->sk_forward_alloc < skb->truesize)
-                       goto drop;
-               __sk_mem_reclaim(ssk, skb->truesize);
-               if (!sk_rmem_schedule(sk, skb, skb->truesize))
+               int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
+
+               if (ssk->sk_forward_alloc < amount)
                        goto drop;
+
+               ssk->sk_forward_alloc -= amount;
+               sk->sk_forward_alloc += amount;
        }
 
        has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
@@ -431,56 +433,55 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
 
        mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               bool slow;
 
-               lock_sock(ssk);
+               slow = lock_sock_fast(ssk);
                if (tcp_can_send_ack(ssk))
                        tcp_send_ack(ssk);
-               release_sock(ssk);
+               unlock_sock_fast(ssk, slow);
        }
 }
 
-static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
+static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
 {
-       int ret;
+       bool slow;
 
-       lock_sock(ssk);
-       ret = tcp_can_send_ack(ssk);
-       if (ret)
+       slow = lock_sock_fast(ssk);
+       if (tcp_can_send_ack(ssk))
                tcp_cleanup_rbuf(ssk, 1);
-       release_sock(ssk);
-       return ret;
+       unlock_sock_fast(ssk, slow);
+}
+
+static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
+{
+       const struct inet_connection_sock *icsk = inet_csk(ssk);
+       u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
+       const struct tcp_sock *tp = tcp_sk(ssk);
+
+       return (ack_pending & ICSK_ACK_SCHED) &&
+               ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
+                 READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
+                (rx_empty && ack_pending &
+                             (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
 }
 
 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
 {
-       struct sock *ack_hint = READ_ONCE(msk->ack_hint);
        int old_space = READ_ONCE(msk->old_wspace);
        struct mptcp_subflow_context *subflow;
        struct sock *sk = (struct sock *)msk;
-       bool cleanup;
+       int space =  __mptcp_space(sk);
+       bool cleanup, rx_empty;
 
-       /* this is a simple superset of what tcp_cleanup_rbuf() implements
-        * so that we don't have to acquire the ssk socket lock most of the time
-        * to do actually nothing
-        */
-       cleanup = __mptcp_space(sk) - old_space >= max(0, old_space);
-       if (!cleanup)
-               return;
+       cleanup = (space > 0) && (space >= (old_space << 1));
+       rx_empty = !atomic_read(&sk->sk_rmem_alloc);
 
-       /* if the hinted ssk is still active, try to use it */
-       if (likely(ack_hint)) {
-               mptcp_for_each_subflow(msk, subflow) {
-                       struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 
-                       if (ack_hint == ssk && mptcp_subflow_cleanup_rbuf(ssk))
-                               return;
-               }
+               if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
+                       mptcp_subflow_cleanup_rbuf(ssk);
        }
-
-       /* otherwise pick the first active subflow */
-       mptcp_for_each_subflow(msk, subflow)
-               if (mptcp_subflow_cleanup_rbuf(mptcp_subflow_tcp_sock(subflow)))
-                       return;
 }
 
 static bool mptcp_check_data_fin(struct sock *sk)
@@ -625,7 +626,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
                        break;
                }
        } while (more_data_avail);
-       WRITE_ONCE(msk->ack_hint, ssk);
 
        *bytes += moved;
        return done;
@@ -677,18 +677,19 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
 /* In most cases we will be able to lock the mptcp socket.  If its already
  * owned, we need to defer to the work queue to avoid ABBA deadlock.
  */
-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
 {
        struct sock *sk = (struct sock *)msk;
        unsigned int moved = 0;
 
-       if (inet_sk_state_load(sk) == TCP_CLOSE)
-               return;
-
-       mptcp_data_lock(sk);
-
        __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
        __mptcp_ofo_queue(msk);
+       if (unlikely(ssk->sk_err)) {
+               if (!sock_owned_by_user(sk))
+                       __mptcp_error_report(sk);
+               else
+                       set_bit(MPTCP_ERROR_REPORT,  &msk->flags);
+       }
 
        /* If the moves have caught up with the DATA_FIN sequence number
         * it's time to ack the DATA_FIN and change socket state, but
@@ -697,7 +698,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
         */
        if (mptcp_pending_data_fin(sk, NULL))
                mptcp_schedule_work(sk);
-       mptcp_data_unlock(sk);
+       return moved > 0;
 }
 
 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
@@ -705,7 +706,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
        struct mptcp_sock *msk = mptcp_sk(sk);
        int sk_rbuf, ssk_rbuf;
-       bool wake;
 
        /* The peer can send data while we are shutting down this
         * subflow at msk destruction time, but we must avoid enqueuing
@@ -714,28 +714,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
        if (unlikely(subflow->disposable))
                return;
 
-       /* move_skbs_to_msk below can legitly clear the data_avail flag,
-        * but we will need later to properly woke the reader, cache its
-        * value
-        */
-       wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
-       if (wake)
-               set_bit(MPTCP_DATA_READY, &msk->flags);
-
        ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
        sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
        if (unlikely(ssk_rbuf > sk_rbuf))
                sk_rbuf = ssk_rbuf;
 
-       /* over limit? can't append more skbs to msk */
+       /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
        if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
-               goto wake;
-
-       move_skbs_to_msk(msk, ssk);
+               return;
 
-wake:
-       if (wake)
+       /* Wake-up the reader only for in-sequence data */
+       mptcp_data_lock(sk);
+       if (move_skbs_to_msk(msk, ssk)) {
+               set_bit(MPTCP_DATA_READY, &msk->flags);
                sk->sk_data_ready(sk);
+       }
+       mptcp_data_unlock(sk);
 }
 
 static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
@@ -867,7 +861,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
        sock_owned_by_me(sk);
 
        mptcp_for_each_subflow(msk, subflow) {
-               if (subflow->data_avail)
+               if (READ_ONCE(subflow->data_avail))
                        return mptcp_subflow_tcp_sock(subflow);
        }
 
@@ -903,22 +897,14 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
                df->data_seq + df->data_len == msk->write_seq;
 }
 
-static int mptcp_wmem_with_overhead(struct sock *sk, int size)
+static int mptcp_wmem_with_overhead(int size)
 {
-       struct mptcp_sock *msk = mptcp_sk(sk);
-       int ret, skbs;
-
-       ret = size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
-       skbs = (msk->tx_pending_data + size) / msk->size_goal_cache;
-       if (skbs < msk->skb_tx_cache.qlen)
-               return ret;
-
-       return ret + (skbs - msk->skb_tx_cache.qlen) * SKB_TRUESIZE(MAX_TCP_HEADER);
+       return size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
 }
 
 static void __mptcp_wmem_reserve(struct sock *sk, int size)
 {
-       int amount = mptcp_wmem_with_overhead(sk, size);
+       int amount = mptcp_wmem_with_overhead(size);
        struct mptcp_sock *msk = mptcp_sk(sk);
 
        WARN_ON_ONCE(msk->wmem_reserved);
@@ -1213,49 +1199,8 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
        return NULL;
 }
 
-static bool mptcp_tx_cache_refill(struct sock *sk, int size,
-                                 struct sk_buff_head *skbs, int *total_ts)
-{
-       struct mptcp_sock *msk = mptcp_sk(sk);
-       struct sk_buff *skb;
-       int space_needed;
-
-       if (unlikely(tcp_under_memory_pressure(sk))) {
-               mptcp_mem_reclaim_partial(sk);
-
-               /* under pressure pre-allocate at most a single skb */
-               if (msk->skb_tx_cache.qlen)
-                       return true;
-               space_needed = msk->size_goal_cache;
-       } else {
-               space_needed = msk->tx_pending_data + size -
-                              msk->skb_tx_cache.qlen * msk->size_goal_cache;
-       }
-
-       while (space_needed > 0) {
-               skb = __mptcp_do_alloc_tx_skb(sk, sk->sk_allocation);
-               if (unlikely(!skb)) {
-                       /* under memory pressure, try to pass the caller a
-                        * single skb to allow forward progress
-                        */
-                       while (skbs->qlen > 1) {
-                               skb = __skb_dequeue_tail(skbs);
-                               *total_ts -= skb->truesize;
-                               __kfree_skb(skb);
-                       }
-                       return skbs->qlen > 0;
-               }
-
-               *total_ts += skb->truesize;
-               __skb_queue_tail(skbs, skb);
-               space_needed -= msk->size_goal_cache;
-       }
-       return true;
-}
-
 static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
 {
-       struct mptcp_sock *msk = mptcp_sk(sk);
        struct sk_buff *skb;
 
        if (ssk->sk_tx_skb_cache) {
@@ -1266,22 +1211,6 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
                return true;
        }
 
-       skb = skb_peek(&msk->skb_tx_cache);
-       if (skb) {
-               if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
-                       skb = __skb_dequeue(&msk->skb_tx_cache);
-                       if (WARN_ON_ONCE(!skb))
-                               return false;
-
-                       mptcp_wmem_uncharge(sk, skb->truesize);
-                       ssk->sk_tx_skb_cache = skb;
-                       return true;
-               }
-
-               /* over memory limit, no point to try to allocate a new skb */
-               return false;
-       }
-
        skb = __mptcp_do_alloc_tx_skb(sk, gfp);
        if (!skb)
                return false;
@@ -1297,7 +1226,6 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
 static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
 {
        return !ssk->sk_tx_skb_cache &&
-              !skb_peek(&mptcp_sk(sk)->skb_tx_cache) &&
               tcp_under_memory_pressure(sk);
 }
 
@@ -1308,6 +1236,18 @@ static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
        return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
 }
 
+/* note: this always recompute the csum on the whole skb, even
+ * if we just appended a single frag. More status info needed
+ */
+static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
+{
+       struct mptcp_ext *mpext = mptcp_get_ext(skb);
+       __wsum csum = ~csum_unfold(mpext->csum);
+       int offset = skb->len - added;
+
+       mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
+}
+
 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
                              struct mptcp_data_frag *dfrag,
                              struct mptcp_sendmsg_info *info)
@@ -1328,7 +1268,6 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
        /* compute send limit */
        info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
        avail_size = info->size_goal;
-       msk->size_goal_cache = info->size_goal;
        skb = tcp_write_queue_tail(ssk);
        if (skb) {
                /* Limit the write to the size available in the
@@ -1402,10 +1341,14 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
        if (zero_window_probe) {
                mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
                mpext->frozen = 1;
-               ret = 0;
+               if (READ_ONCE(msk->csum_enabled))
+                       mptcp_update_data_checksum(tail, ret);
                tcp_push_pending_frames(ssk);
+               return 0;
        }
 out:
+       if (READ_ONCE(msk->csum_enabled))
+               mptcp_update_data_checksum(tail, ret);
        mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
        return ret;
 }
@@ -1673,7 +1616,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        while (msg_data_left(msg)) {
                int total_ts, frag_truesize = 0;
                struct mptcp_data_frag *dfrag;
-               struct sk_buff_head skbs;
                bool dfrag_collapsed;
                size_t psize, offset;
 
@@ -1706,16 +1648,10 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                psize = pfrag->size - offset;
                psize = min_t(size_t, psize, msg_data_left(msg));
                total_ts = psize + frag_truesize;
-               __skb_queue_head_init(&skbs);
-               if (!mptcp_tx_cache_refill(sk, psize, &skbs, &total_ts))
-                       goto wait_for_memory;
 
-               if (!mptcp_wmem_alloc(sk, total_ts)) {
-                       __skb_queue_purge(&skbs);
+               if (!mptcp_wmem_alloc(sk, total_ts))
                        goto wait_for_memory;
-               }
 
-               skb_queue_splice_tail(&skbs, &msk->skb_tx_cache);
                if (copy_page_from_iter(dfrag->page, offset, psize,
                                        &msg->msg_iter) != psize) {
                        mptcp_wmem_uncharge(sk, psize + frag_truesize);
@@ -1772,7 +1708,7 @@ static void mptcp_wait_data(struct sock *sk, long *timeo)
        sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 
        sk_wait_event(sk, timeo,
-                     test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
+                     test_bit(MPTCP_DATA_READY, &msk->flags), &wait);
 
        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
        remove_wait_queue(sk_sleep(sk), &wait);
@@ -1970,7 +1906,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
                __mptcp_update_rmem(sk);
                done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
                mptcp_data_unlock(sk);
-               tcp_cleanup_rbuf(ssk, moved);
+
+               if (unlikely(ssk->sk_err))
+                       __mptcp_error_report(sk);
                unlock_sock_fast(ssk, slowpath);
        } while (!done);
 
@@ -1983,7 +1921,6 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
                ret |= __mptcp_ofo_queue(msk);
                __mptcp_splice_receive_queue(sk);
                mptcp_data_unlock(sk);
-               mptcp_cleanup_rbuf(msk);
        }
        if (ret)
                mptcp_check_data_fin((struct sock *)msk);
@@ -2093,10 +2030,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                 */
                if (unlikely(__mptcp_move_skbs(msk)))
                        set_bit(MPTCP_DATA_READY, &msk->flags);
-       } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
-               /* data to read but mptcp_wait_data() cleared DATA_READY */
-               set_bit(MPTCP_DATA_READY, &msk->flags);
        }
+
 out_err:
        if (cmsg_flags && copied >= 0) {
                if (cmsg_flags & MPTCP_CMSG_TS)
@@ -2234,9 +2169,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
        if (ssk == msk->last_snd)
                msk->last_snd = NULL;
 
-       if (ssk == msk->ack_hint)
-               msk->ack_hint = NULL;
-
        if (ssk == msk->first)
                msk->first = NULL;
 
@@ -2308,13 +2240,14 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
 
        list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
                struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+               bool slow;
 
-               lock_sock(tcp_sk);
+               slow = lock_sock_fast(tcp_sk);
                if (tcp_sk->sk_state != TCP_CLOSE) {
                        tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
                        tcp_set_state(tcp_sk, TCP_CLOSE);
                }
-               release_sock(tcp_sk);
+               unlock_sock_fast(tcp_sk, slow);
        }
 
        inet_sk_state_store(sk, TCP_CLOSE);
@@ -2359,8 +2292,8 @@ static void __mptcp_retrans(struct sock *sk)
 
        /* limit retransmission to the bytes already sent on some subflows */
        info.sent = 0;
-       info.limit = dfrag->already_sent;
-       while (info.sent < dfrag->already_sent) {
+       info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
+       while (info.sent < info.limit) {
                if (!mptcp_alloc_tx_skb(sk, ssk))
                        break;
 
@@ -2372,9 +2305,11 @@ static void __mptcp_retrans(struct sock *sk)
                copied += ret;
                info.sent += ret;
        }
-       if (copied)
+       if (copied) {
+               dfrag->already_sent = max(dfrag->already_sent, info.sent);
                tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
                         info.size_goal);
+       }
 
        mptcp_set_timeout(sk, ssk);
        release_sock(ssk);
@@ -2442,17 +2377,15 @@ static int __mptcp_init_sock(struct sock *sk)
        INIT_LIST_HEAD(&msk->rtx_queue);
        INIT_WORK(&msk->work, mptcp_worker);
        __skb_queue_head_init(&msk->receive_queue);
-       __skb_queue_head_init(&msk->skb_tx_cache);
        msk->out_of_order_queue = RB_ROOT;
        msk->first_pending = NULL;
        msk->wmem_reserved = 0;
        msk->rmem_released = 0;
        msk->tx_pending_data = 0;
-       msk->size_goal_cache = TCP_BASE_MSS;
 
-       msk->ack_hint = NULL;
        msk->first = NULL;
        inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
+       WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
 
        mptcp_pm_data_init(msk);
 
@@ -2504,15 +2437,10 @@ static void __mptcp_clear_xmit(struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
        struct mptcp_data_frag *dtmp, *dfrag;
-       struct sk_buff *skb;
 
        WRITE_ONCE(msk->first_pending, NULL);
        list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
                dfrag_clear(sk, dfrag);
-       while ((skb = __skb_dequeue(&msk->skb_tx_cache)) != NULL) {
-               sk->sk_forward_alloc += skb->truesize;
-               kfree_skb(skb);
-       }
 }
 
 static void mptcp_cancel_work(struct sock *sk)
@@ -2793,6 +2721,8 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        msk->token = subflow_req->token;
        msk->subflow = NULL;
        WRITE_ONCE(msk->fully_established, false);
+       if (mp_opt->csum_reqd)
+               WRITE_ONCE(msk->csum_enabled, true);
 
        msk->write_seq = subflow_req->idsn + 1;
        msk->snd_nxt = msk->write_seq;
index 89f6b73..d8ad327 100644 (file)
@@ -68,6 +68,8 @@
 #define TCPOLEN_MPTCP_FASTCLOSE                12
 #define TCPOLEN_MPTCP_RST              4
 
+#define TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM        (TCPOLEN_MPTCP_DSS_CHECKSUM + TCPOLEN_MPTCP_MPC_ACK_DATA)
+
 /* MPTCP MP_JOIN flags */
 #define MPTCPOPT_BACKUP                BIT(0)
 #define MPTCPOPT_HMAC_LEN      20
@@ -77,8 +79,9 @@
 #define MPTCP_VERSION_MASK     (0x0F)
 #define MPTCP_CAP_CHECKSUM_REQD        BIT(7)
 #define MPTCP_CAP_EXTENSIBILITY        BIT(6)
+#define MPTCP_CAP_DENY_JOIN_ID0        BIT(5)
 #define MPTCP_CAP_HMAC_SHA256  BIT(0)
-#define MPTCP_CAP_FLAG_MASK    (0x3F)
+#define MPTCP_CAP_FLAG_MASK    (0x1F)
 
 /* MPTCP DSS flags */
 #define MPTCP_DSS_DATA_FIN     BIT(4)
@@ -124,6 +127,7 @@ struct mptcp_options_received {
        u64     data_seq;
        u32     subflow_seq;
        u16     data_len;
+       __sum16 csum;
        u16     mp_capable : 1,
                mp_join : 1,
                fastclose : 1,
@@ -133,7 +137,9 @@ struct mptcp_options_received {
                rm_addr : 1,
                mp_prio : 1,
                echo : 1,
-               backup : 1;
+               csum_reqd : 1,
+               backup : 1,
+               deny_join_id0 : 1;
        u32     token;
        u32     nonce;
        u64     thmac;
@@ -188,6 +194,7 @@ struct mptcp_pm_data {
        bool            work_pending;
        bool            accept_addr;
        bool            accept_subflow;
+       bool            remote_deny_join_id0;
        u8              add_addr_signaled;
        u8              add_addr_accepted;
        u8              local_addr_used;
@@ -234,15 +241,13 @@ struct mptcp_sock {
        bool            snd_data_fin_enable;
        bool            rcv_fastclose;
        bool            use_64bit_ack; /* Set when we received a 64-bit DSN */
+       bool            csum_enabled;
        spinlock_t      join_list_lock;
-       struct sock     *ack_hint;
        struct work_struct work;
        struct sk_buff  *ooo_last_skb;
        struct rb_root  out_of_order_queue;
        struct sk_buff_head receive_queue;
-       struct sk_buff_head skb_tx_cache;       /* this is wmem accounted */
        int             tx_pending_data;
-       int             size_goal_cache;
        struct list_head conn_list;
        struct list_head rtx_queue;
        struct mptcp_data_frag *first_pending;
@@ -335,11 +340,20 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
        return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
 }
 
+struct csum_pseudo_header {
+       __be64 data_seq;
+       __be32 subflow_seq;
+       __be16 data_len;
+       __sum16 csum;
+};
+
 struct mptcp_subflow_request_sock {
        struct  tcp_request_sock sk;
        u16     mp_capable : 1,
                mp_join : 1,
-               backup : 1;
+               backup : 1,
+               csum_reqd : 1,
+               allow_join_id0 : 1;
        u8      local_id;
        u8      remote_id;
        u64     local_key;
@@ -362,7 +376,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
 enum mptcp_data_avail {
        MPTCP_SUBFLOW_NODATA,
        MPTCP_SUBFLOW_DATA_AVAIL,
-       MPTCP_SUBFLOW_OOO_DATA
 };
 
 struct mptcp_delegated_action {
@@ -387,6 +400,8 @@ struct mptcp_subflow_context {
        u32     map_subflow_seq;
        u32     ssn_offset;
        u32     map_data_len;
+       __wsum  map_data_csum;
+       u32     map_csum_len;
        u32     request_mptcp : 1,  /* send MP_CAPABLE */
                request_join : 1,   /* send MP_JOIN */
                request_bkup : 1,
@@ -396,6 +411,8 @@ struct mptcp_subflow_context {
                pm_notified : 1,    /* PM hook called for established status */
                conn_finished : 1,
                map_valid : 1,
+               map_csum_reqd : 1,
+               map_data_fin : 1,
                mpc_map : 1,
                backup : 1,
                send_mp_prio : 1,
@@ -525,6 +542,8 @@ static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *su
 
 int mptcp_is_enabled(struct net *net);
 unsigned int mptcp_get_add_addr_timeout(struct net *net);
+int mptcp_is_checksum_enabled(struct net *net);
+int mptcp_allow_join_id0(struct net *net);
 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
                                     struct mptcp_options_received *mp_opt);
 bool mptcp_subflow_data_available(struct sock *sk);
@@ -576,7 +595,8 @@ int __init mptcp_proto_v6_init(void);
 struct sock *mptcp_sk_clone(const struct sock *sk,
                            const struct mptcp_options_received *mp_opt,
                            struct request_sock *req);
-void mptcp_get_options(const struct sk_buff *skb,
+void mptcp_get_options(const struct sock *sk,
+                      const struct sk_buff *skb,
                       struct mptcp_options_received *mp_opt);
 
 void mptcp_finish_connect(struct sock *sk);
index 3395633..d55f4ef 100644 (file)
@@ -108,6 +108,8 @@ static void subflow_init_req(struct request_sock *req, const struct sock *sk_lis
 
        subflow_req->mp_capable = 0;
        subflow_req->mp_join = 0;
+       subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
+       subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
        subflow_req->msk = NULL;
        mptcp_token_init_request(req);
 }
@@ -150,7 +152,7 @@ static int subflow_check_req(struct request_sock *req,
                return -EINVAL;
 #endif
 
-       mptcp_get_options(skb, &mp_opt);
+       mptcp_get_options(sk_listener, skb, &mp_opt);
 
        if (mp_opt.mp_capable) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
@@ -247,7 +249,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
        int err;
 
        subflow_init_req(req, sk_listener);
-       mptcp_get_options(skb, &mp_opt);
+       mptcp_get_options(sk_listener, skb, &mp_opt);
 
        if (mp_opt.mp_capable && mp_opt.mp_join)
                return -EINVAL;
@@ -394,7 +396,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
        subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
        pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
 
-       mptcp_get_options(skb, &mp_opt);
+       mptcp_get_options(sk, skb, &mp_opt);
        if (subflow->request_mptcp) {
                if (!mp_opt.mp_capable) {
                        MPTCP_INC_STATS(sock_net(sk),
@@ -404,6 +406,10 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
                        goto fallback;
                }
 
+               if (mp_opt.csum_reqd)
+                       WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true);
+               if (mp_opt.deny_join_id0)
+                       WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true);
                subflow->mp_capable = 1;
                subflow->can_ack = 1;
                subflow->remote_key = mp_opt.sndr_key;
@@ -638,7 +644,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
                 * reordered MPC will cause fallback, but we don't have other
                 * options.
                 */
-               mptcp_get_options(skb, &mp_opt);
+               mptcp_get_options(sk, skb, &mp_opt);
                if (!mp_opt.mp_capable) {
                        fallback = true;
                        goto create_child;
@@ -648,7 +654,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
                if (!new_msk)
                        fallback = true;
        } else if (subflow_req->mp_join) {
-               mptcp_get_options(skb, &mp_opt);
+               mptcp_get_options(sk, skb, &mp_opt);
                if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
                    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
@@ -784,10 +790,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
        return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
 }
 
-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
 {
-       WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
-                 ssn, subflow->map_subflow_seq, subflow->map_data_len);
+       pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+                ssn, subflow->map_subflow_seq, subflow->map_data_len);
 }
 
 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
@@ -812,22 +818,104 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
                /* Mapping covers data later in the subflow stream,
                 * currently unsupported.
                 */
-               warn_bad_map(subflow, ssn);
+               dbg_bad_map(subflow, ssn);
                return false;
        }
        if (unlikely(!before(ssn, subflow->map_subflow_seq +
                                  subflow->map_data_len))) {
                /* Mapping does covers past subflow data, invalid */
-               warn_bad_map(subflow, ssn + skb->len);
+               dbg_bad_map(subflow, ssn);
                return false;
        }
        return true;
 }
 
+static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
+                                             bool csum_reqd)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       struct csum_pseudo_header header;
+       u32 offset, seq, delta;
+       __wsum csum;
+       int len;
+
+       if (!csum_reqd)
+               return MAPPING_OK;
+
+       /* mapping already validated on previous traversal */
+       if (subflow->map_csum_len == subflow->map_data_len)
+               return MAPPING_OK;
+
+       /* traverse the receive queue, ensuring it contains a full
+        * DSS mapping and accumulating the related csum.
+        * Preserve the accoumlate csum across multiple calls, to compute
+        * the csum only once
+        */
+       delta = subflow->map_data_len - subflow->map_csum_len;
+       for (;;) {
+               seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
+               offset = seq - TCP_SKB_CB(skb)->seq;
+
+               /* if the current skb has not been accounted yet, csum its contents
+                * up to the amount covered by the current DSS
+                */
+               if (offset < skb->len) {
+                       __wsum csum;
+
+                       len = min(skb->len - offset, delta);
+                       csum = skb_checksum(skb, offset, len, 0);
+                       subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
+                                                               subflow->map_csum_len);
+
+                       delta -= len;
+                       subflow->map_csum_len += len;
+               }
+               if (delta == 0)
+                       break;
+
+               if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
+                       /* if this subflow is closed, the partial mapping
+                        * will be never completed; flush the pending skbs, so
+                        * that subflow_sched_work_if_closed() can kick in
+                        */
+                       if (unlikely(ssk->sk_state == TCP_CLOSE))
+                               while ((skb = skb_peek(&ssk->sk_receive_queue)))
+                                       sk_eat_skb(ssk, skb);
+
+                       /* not enough data to validate the csum */
+                       return MAPPING_EMPTY;
+               }
+
+               /* the DSS mapping for next skbs will be validated later,
+                * when a get_mapping_status call will process such skb
+                */
+               skb = skb->next;
+       }
+
+       /* note that 'map_data_len' accounts only for the carried data, does
+        * not include the eventual seq increment due to the data fin,
+        * while the pseudo header requires the original DSS data len,
+        * including that
+        */
+       header.data_seq = cpu_to_be64(subflow->map_seq);
+       header.subflow_seq = htonl(subflow->map_subflow_seq);
+       header.data_len = htons(subflow->map_data_len + subflow->map_data_fin);
+       header.csum = 0;
+
+       csum = csum_partial(&header, sizeof(header), subflow->map_data_csum);
+       if (unlikely(csum_fold(csum))) {
+               MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
+               return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
+       }
+
+       return MAPPING_OK;
+}
+
 static enum mapping_status get_mapping_status(struct sock *ssk,
                                              struct mptcp_sock *msk)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+       bool csum_reqd = READ_ONCE(msk->csum_enabled);
        struct mptcp_ext *mpext;
        struct sk_buff *skb;
        u16 data_len;
@@ -920,9 +1008,10 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
                /* Allow replacing only with an identical map */
                if (subflow->map_seq == map_seq &&
                    subflow->map_subflow_seq == mpext->subflow_seq &&
-                   subflow->map_data_len == data_len) {
+                   subflow->map_data_len == data_len &&
+                   subflow->map_csum_reqd == mpext->csum_reqd) {
                        skb_ext_del(skb, SKB_EXT_MPTCP);
-                       return MAPPING_OK;
+                       goto validate_csum;
                }
 
                /* If this skb data are fully covered by the current mapping,
@@ -934,27 +1023,41 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
                }
 
                /* will validate the next map after consuming the current one */
-               return MAPPING_OK;
+               goto validate_csum;
        }
 
        subflow->map_seq = map_seq;
        subflow->map_subflow_seq = mpext->subflow_seq;
        subflow->map_data_len = data_len;
        subflow->map_valid = 1;
+       subflow->map_data_fin = mpext->data_fin;
        subflow->mpc_map = mpext->mpc_map;
-       pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
+       subflow->map_csum_reqd = mpext->csum_reqd;
+       subflow->map_csum_len = 0;
+       subflow->map_data_csum = csum_unfold(mpext->csum);
+
+       /* Cfr RFC 8684 Section 3.3.0 */
+       if (unlikely(subflow->map_csum_reqd != csum_reqd))
+               return MAPPING_INVALID;
+
+       pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
                 subflow->map_seq, subflow->map_subflow_seq,
-                subflow->map_data_len);
+                subflow->map_data_len, subflow->map_csum_reqd,
+                subflow->map_data_csum);
 
 validate_seq:
        /* we revalidate valid mapping on new skb, because we must ensure
         * the current skb is completely covered by the available mapping
         */
-       if (!validate_mapping(ssk, skb))
+       if (!validate_mapping(ssk, skb)) {
+               MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
                return MAPPING_INVALID;
+       }
 
        skb_ext_del(skb, SKB_EXT_MPTCP);
-       return MAPPING_OK;
+
+validate_csum:
+       return validate_data_csum(ssk, skb, csum_reqd);
 }
 
 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
@@ -1000,7 +1103,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
        struct sk_buff *skb;
 
        if (!skb_peek(&ssk->sk_receive_queue))
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
        if (subflow->data_avail)
                return true;
 
@@ -1039,18 +1142,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
                ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
                pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
                         ack_seq);
-               if (ack_seq == old_ack) {
-                       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
-                       break;
-               } else if (after64(ack_seq, old_ack)) {
-                       subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
-                       break;
+               if (unlikely(before64(ack_seq, old_ack))) {
+                       mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+                       continue;
                }
 
-               /* only accept in-sequence mapping. Old values are spurious
-                * retransmission
-                */
-               mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
+               WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+               break;
        }
        return true;
 
@@ -1065,12 +1163,11 @@ fallback:
                 * subflow_error_report() will introduce the appropriate barriers
                 */
                ssk->sk_err = EBADMSG;
-               ssk->sk_error_report(ssk);
                tcp_set_state(ssk, TCP_CLOSE);
                subflow->reset_transient = 0;
                subflow->reset_reason = MPTCP_RST_EMPTCP;
                tcp_send_active_reset(ssk, GFP_ATOMIC);
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
                return false;
        }
 
@@ -1080,7 +1177,7 @@ fallback:
        subflow->map_seq = READ_ONCE(msk->ack_seq);
        subflow->map_data_len = skb->len;
        subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
-       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+       WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
        return true;
 }
 
@@ -1092,7 +1189,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
        if (subflow->map_valid &&
            mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
                subflow->map_valid = 0;
-               subflow->data_avail = 0;
+               WRITE_ONCE(subflow->data_avail, 0);
 
                pr_debug("Done with mapping: seq=%u data_len=%u",
                         subflow->map_subflow_seq,
@@ -1120,41 +1217,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
        *full_space = tcp_full_space(sk);
 }
 
-static void subflow_data_ready(struct sock *sk)
-{
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       u16 state = 1 << inet_sk_state_load(sk);
-       struct sock *parent = subflow->conn;
-       struct mptcp_sock *msk;
-
-       msk = mptcp_sk(parent);
-       if (state & TCPF_LISTEN) {
-               /* MPJ subflow are removed from accept queue before reaching here,
-                * avoid stray wakeups
-                */
-               if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
-                       return;
-
-               set_bit(MPTCP_DATA_READY, &msk->flags);
-               parent->sk_data_ready(parent);
-               return;
-       }
-
-       WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
-                    !subflow->mp_join && !(state & TCPF_CLOSE));
-
-       if (mptcp_subflow_data_available(sk))
-               mptcp_data_ready(parent, sk);
-}
-
-static void subflow_write_space(struct sock *ssk)
-{
-       struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
-
-       mptcp_propagate_sndbuf(sk, ssk);
-       mptcp_write_space(sk);
-}
-
 void __mptcp_error_report(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow;
@@ -1195,6 +1257,43 @@ static void subflow_error_report(struct sock *ssk)
        mptcp_data_unlock(sk);
 }
 
+static void subflow_data_ready(struct sock *sk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       u16 state = 1 << inet_sk_state_load(sk);
+       struct sock *parent = subflow->conn;
+       struct mptcp_sock *msk;
+
+       msk = mptcp_sk(parent);
+       if (state & TCPF_LISTEN) {
+               /* MPJ subflow are removed from accept queue before reaching here,
+                * avoid stray wakeups
+                */
+               if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
+                       return;
+
+               set_bit(MPTCP_DATA_READY, &msk->flags);
+               parent->sk_data_ready(parent);
+               return;
+       }
+
+       WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+                    !subflow->mp_join && !(state & TCPF_CLOSE));
+
+       if (mptcp_subflow_data_available(sk))
+               mptcp_data_ready(parent, sk);
+       else if (unlikely(sk->sk_err))
+               subflow_error_report(sk);
+}
+
+static void subflow_write_space(struct sock *ssk)
+{
+       struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
+       mptcp_propagate_sndbuf(sk, ssk);
+       mptcp_write_space(sk);
+}
+
 static struct inet_connection_sock_af_ops *
 subflow_default_af_ops(struct sock *sk)
 {
@@ -1505,6 +1604,8 @@ static void subflow_state_change(struct sock *sk)
         */
        if (mptcp_subflow_data_available(sk))
                mptcp_data_ready(parent, sk);
+       else if (unlikely(sk->sk_err))
+               subflow_error_report(sk);
 
        subflow_sched_work_if_closed(mptcp_sk(parent), sk);
 
index 87112da..049890e 100644 (file)
@@ -74,7 +74,7 @@ obj-$(CONFIG_NF_DUP_NETDEV)   += nf_dup_netdev.o
 nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
                  nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
                  nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
-                 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o \
+                 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o nft_last.o \
                  nft_chain_route.o nf_tables_offload.o \
                  nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
                  nft_set_pipapo.o
index be14e0b..5564740 100644 (file)
 static DEFINE_MUTEX(nf_ct_proto_mutex);
 
 #ifdef CONFIG_SYSCTL
-__printf(5, 6)
+__printf(4, 5)
 void nf_l4proto_log_invalid(const struct sk_buff *skb,
-                           struct net *net,
-                           u16 pf, u8 protonum,
+                           const struct nf_hook_state *state,
+                           u8 protonum,
                            const char *fmt, ...)
 {
+       struct net *net = state->net;
        struct va_format vaf;
        va_list args;
 
@@ -62,15 +63,16 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
-                     "nf_ct_proto_%d: %pV ", protonum, &vaf);
+       nf_log_packet(net, state->pf, 0, skb, state->in, state->out,
+                     NULL, "nf_ct_proto_%d: %pV ", protonum, &vaf);
        va_end(args);
 }
 EXPORT_SYMBOL_GPL(nf_l4proto_log_invalid);
 
-__printf(3, 4)
+__printf(4, 5)
 void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const struct nf_conn *ct,
+                              const struct nf_hook_state *state,
                               const char *fmt, ...)
 {
        struct va_format vaf;
@@ -85,7 +87,7 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       nf_l4proto_log_invalid(skb, net, nf_ct_l3num(ct),
+       nf_l4proto_log_invalid(skb, state,
                               nf_ct_protonum(ct), "%pV", &vaf);
        va_end(args);
 }
index 4f33307..c1557d4 100644 (file)
@@ -382,7 +382,8 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
 
 static noinline bool
 dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
-        const struct dccp_hdr *dh)
+        const struct dccp_hdr *dh,
+        const struct nf_hook_state *hook_state)
 {
        struct net *net = nf_ct_net(ct);
        struct nf_dccp_net *dn;
@@ -414,7 +415,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 
 out_invalid:
-       nf_ct_l4proto_log_invalid(skb, ct, "%s", msg);
+       nf_ct_l4proto_log_invalid(skb, ct, hook_state, "%s", msg);
        return false;
 }
 
@@ -464,8 +465,7 @@ static bool dccp_error(const struct dccp_hdr *dh,
        }
        return false;
 out_invalid:
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_DCCP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg);
        return true;
 }
 
@@ -488,7 +488,7 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
                return -NF_ACCEPT;
 
        type = dh->dccph_type;
-       if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh))
+       if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
                return -NF_ACCEPT;
 
        if (type == DCCP_PKT_RESET &&
@@ -543,11 +543,11 @@ int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
                ct->proto.dccp.last_pkt = type;
 
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid packet");
+               nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid packet");
                return NF_ACCEPT;
        case CT_DCCP_INVALID:
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct, "%s", "invalid state transition");
+               nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid state transition");
                return -NF_ACCEPT;
        }
 
index 4efd874..b38b716 100644 (file)
@@ -170,12 +170,12 @@ int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
        ct_daddr = &ct->tuplehash[dir].tuple.dst.u3;
        if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) {
                if (state->pf == AF_INET) {
-                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                       nf_l4proto_log_invalid(skb, state,
                                               l4proto,
                                               "outer daddr %pI4 != inner %pI4",
                                               &outer_daddr->ip, &ct_daddr->ip);
                } else if (state->pf == AF_INET6) {
-                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                       nf_l4proto_log_invalid(skb, state,
                                               l4proto,
                                               "outer daddr %pI6 != inner %pI6",
                                               &outer_daddr->ip6, &ct_daddr->ip6);
@@ -197,8 +197,7 @@ static void icmp_error_log(const struct sk_buff *skb,
                           const struct nf_hook_state *state,
                           const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_ICMP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_ICMP, "%s", msg);
 }
 
 /* Small and modified version of icmp_rcv */
index facd8c6..61e3b05 100644 (file)
@@ -126,8 +126,7 @@ static void icmpv6_error_log(const struct sk_buff *skb,
                             const struct nf_hook_state *state,
                             const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_ICMPV6, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_ICMPV6, "%s", msg);
 }
 
 int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
index fb8dc02..2394238 100644 (file)
@@ -351,7 +351,7 @@ static bool sctp_error(struct sk_buff *skb,
        }
        return false;
 out_invalid:
-       nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_SCTP, "%s", logmsg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_SCTP, "%s", logmsg);
        return true;
 }
 
index de840fc..f7e8baf 100644 (file)
@@ -446,14 +446,15 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
        }
 }
 
-static bool tcp_in_window(const struct nf_conn *ct,
-                         struct ip_ct_tcp *state,
+static bool tcp_in_window(struct nf_conn *ct,
                          enum ip_conntrack_dir dir,
                          unsigned int index,
                          const struct sk_buff *skb,
                          unsigned int dataoff,
-                         const struct tcphdr *tcph)
+                         const struct tcphdr *tcph,
+                         const struct nf_hook_state *hook_state)
 {
+       struct ip_ct_tcp *state = &ct->proto.tcp;
        struct net *net = nf_ct_net(ct);
        struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct ip_ct_tcp_state *sender = &state->seen[dir];
@@ -670,7 +671,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                    tn->tcp_be_liberal)
                        res = true;
                if (!res) {
-                       nf_ct_l4proto_log_invalid(skb, ct,
+                       nf_ct_l4proto_log_invalid(skb, ct, hook_state,
                        "%s",
                        before(seq, sender->td_maxend + 1) ?
                        in_recv_win ?
@@ -710,7 +711,7 @@ static void tcp_error_log(const struct sk_buff *skb,
                          const struct nf_hook_state *state,
                          const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
 }
 
 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
@@ -970,7 +971,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                                        IP_CT_EXP_CHALLENGE_ACK;
                }
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct,
+               nf_ct_l4proto_log_invalid(skb, ct, state,
                                          "packet (index %d) in dir %d ignored, state %s",
                                          index, dir,
                                          tcp_conntrack_names[old_state]);
@@ -995,7 +996,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
                         dir, get_conntrack_index(th), old_state);
                spin_unlock_bh(&ct->lock);
-               nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
+               nf_ct_l4proto_log_invalid(skb, ct, state, "invalid state");
                return -NF_ACCEPT;
        case TCP_CONNTRACK_TIME_WAIT:
                /* RFC5961 compliance cause stack to send "challenge-ACK"
@@ -1010,7 +1011,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                        /* Detected RFC5961 challenge ACK */
                        ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
                        spin_unlock_bh(&ct->lock);
-                       nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
+                       nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
                        return NF_ACCEPT; /* Don't change state */
                }
                break;
@@ -1035,7 +1036,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                        if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
                                /* Invalid RST  */
                                spin_unlock_bh(&ct->lock);
-                               nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
+                               nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
                                return -NF_ACCEPT;
                        }
 
@@ -1079,8 +1080,8 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                break;
        }
 
-       if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
-                          skb, dataoff, th)) {
+       if (!tcp_in_window(ct, dir, index,
+                          skb, dataoff, th, state)) {
                spin_unlock_bh(&ct->lock);
                return -NF_ACCEPT;
        }
index 68911fc..698fee4 100644 (file)
@@ -38,8 +38,7 @@ static void udp_error_log(const struct sk_buff *skb,
                          const struct nf_hook_state *state,
                          const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_UDP, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_UDP, "%s", msg);
 }
 
 static bool udp_error(struct sk_buff *skb,
@@ -130,8 +129,7 @@ static void udplite_error_log(const struct sk_buff *skb,
                              const struct nf_hook_state *state,
                              const char *msg)
 {
-       nf_l4proto_log_invalid(skb, state->net, state->pf,
-                              IPPROTO_UDPLITE, "%s", msg);
+       nf_l4proto_log_invalid(skb, state, IPPROTO_UDPLITE, "%s", msg);
 }
 
 static bool udplite_error(struct sk_buff *skb,
index b100c04..3d6d494 100644 (file)
@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
        int length = (th->doff * 4) - sizeof(*th);
        u8 buf[40], *ptr;
 
+       if (unlikely(length < 0))
+               return false;
+
        ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
        if (ptr == NULL)
                return false;
@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return true;
                        opsize = *ptr++;
                        if (opsize < 2)
                                return true;
index f20f6ae..d621424 100644 (file)
@@ -4338,13 +4338,45 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        err = nf_tables_set_alloc_name(&ctx, set, name);
        kfree(name);
        if (err < 0)
-               goto err_set_alloc_name;
+               goto err_set_name;
+
+       udata = NULL;
+       if (udlen) {
+               udata = set->data + size;
+               nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
+       }
+
+       INIT_LIST_HEAD(&set->bindings);
+       INIT_LIST_HEAD(&set->catchall_list);
+       set->table = table;
+       write_pnet(&set->net, net);
+       set->ops = ops;
+       set->ktype = ktype;
+       set->klen = desc.klen;
+       set->dtype = dtype;
+       set->objtype = objtype;
+       set->dlen = desc.dlen;
+       set->flags = flags;
+       set->size = desc.size;
+       set->policy = policy;
+       set->udlen = udlen;
+       set->udata = udata;
+       set->timeout = timeout;
+       set->gc_int = gc_int;
+
+       set->field_count = desc.field_count;
+       for (i = 0; i < desc.field_count; i++)
+               set->field_len[i] = desc.field_len[i];
+
+       err = ops->init(set, &desc, nla);
+       if (err < 0)
+               goto err_set_init;
 
        if (nla[NFTA_SET_EXPR]) {
                expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
                if (IS_ERR(expr)) {
                        err = PTR_ERR(expr);
-                       goto err_set_alloc_name;
+                       goto err_set_expr_alloc;
                }
                set->exprs[0] = expr;
                set->num_exprs++;
@@ -4355,75 +4387,44 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
 
                if (!(flags & NFT_SET_EXPR)) {
                        err = -EINVAL;
-                       goto err_set_alloc_name;
+                       goto err_set_expr_alloc;
                }
                i = 0;
                nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
                        if (i == NFT_SET_EXPR_MAX) {
                                err = -E2BIG;
-                               goto err_set_init;
+                               goto err_set_expr_alloc;
                        }
                        if (nla_type(tmp) != NFTA_LIST_ELEM) {
                                err = -EINVAL;
-                               goto err_set_init;
+                               goto err_set_expr_alloc;
                        }
                        expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
                        if (IS_ERR(expr)) {
                                err = PTR_ERR(expr);
-                               goto err_set_init;
+                               goto err_set_expr_alloc;
                        }
                        set->exprs[i++] = expr;
                        set->num_exprs++;
                }
        }
 
-       udata = NULL;
-       if (udlen) {
-               udata = set->data + size;
-               nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
-       }
-
-       INIT_LIST_HEAD(&set->bindings);
-       INIT_LIST_HEAD(&set->catchall_list);
-       set->table = table;
-       write_pnet(&set->net, net);
-       set->ops   = ops;
-       set->ktype = ktype;
-       set->klen  = desc.klen;
-       set->dtype = dtype;
-       set->objtype = objtype;
-       set->dlen  = desc.dlen;
-       set->flags = flags;
-       set->size  = desc.size;
-       set->policy = policy;
-       set->udlen  = udlen;
-       set->udata  = udata;
-       set->timeout = timeout;
-       set->gc_int = gc_int;
        set->handle = nf_tables_alloc_handle(table);
 
-       set->field_count = desc.field_count;
-       for (i = 0; i < desc.field_count; i++)
-               set->field_len[i] = desc.field_len[i];
-
-       err = ops->init(set, &desc, nla);
-       if (err < 0)
-               goto err_set_init;
-
        err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
        if (err < 0)
-               goto err_set_trans;
+               goto err_set_expr_alloc;
 
        list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
-err_set_trans:
-       ops->destroy(set);
-err_set_init:
+err_set_expr_alloc:
        for (i = 0; i < set->num_exprs; i++)
                nft_expr_destroy(&ctx, set->exprs[i]);
-err_set_alloc_name:
+
+       ops->destroy(set);
+err_set_init:
        kfree(set->name);
 err_set_name:
        kvfree(set);
index 7780342..866cfba 100644 (file)
@@ -268,6 +268,7 @@ static struct nft_expr_type *nft_basic_types[] = {
        &nft_meta_type,
        &nft_rt_type,
        &nft_exthdr_type,
+       &nft_last_type,
 };
 
 static struct nft_object_type *nft_basic_objects[] = {
index 58fda6a..50b4e3c 100644 (file)
@@ -126,8 +126,10 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
 
 #ifdef CONFIG_KALLSYMS
        ret = snprintf(sym, sizeof(sym), "%ps", ops->hook);
-       if (ret < 0 || ret > (int)sizeof(sym))
+       if (ret >= sizeof(sym)) {
+               ret = -EINVAL;
                goto nla_put_failure;
+       }
 
        module_name = strstr(sym, " [");
        if (module_name) {
index 7f705b5..4f583d2 100644 (file)
@@ -164,7 +164,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
 {
        struct tcphdr *tcph;
 
-       if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP)
+       if (pkt->tprot != IPPROTO_TCP)
                return NULL;
 
        tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
@@ -312,6 +312,9 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
        const struct sctp_chunkhdr *sch;
        struct sctp_chunkhdr _sch;
 
+       if (pkt->tprot != IPPROTO_SCTP)
+               goto err;
+
        do {
                sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch);
                if (!sch || !sch->length)
@@ -334,7 +337,7 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
                }
                offset += SCTP_PAD4(ntohs(sch->length));
        } while (offset < pkt->skb->len);
-
+err:
        if (priv->flags & NFT_EXTHDR_F_PRESENT)
                nft_reg_store8(dest, false);
        else
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
new file mode 100644 (file)
index 0000000..913ac45
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_last_priv {
+       unsigned long   last_jiffies;
+       unsigned int    last_set;
+};
+
+static const struct nla_policy nft_last_policy[NFTA_LAST_MAX + 1] = {
+       [NFTA_LAST_SET] = { .type = NLA_U32 },
+       [NFTA_LAST_MSECS] = { .type = NLA_U64 },
+};
+
+static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                        const struct nlattr * const tb[])
+{
+       struct nft_last_priv *priv = nft_expr_priv(expr);
+       u64 last_jiffies;
+       int err;
+
+       if (tb[NFTA_LAST_MSECS]) {
+               err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
+               if (err < 0)
+                       return err;
+
+               priv->last_jiffies = jiffies + (unsigned long)last_jiffies;
+               priv->last_set = 1;
+       }
+
+       return 0;
+}
+
+static void nft_last_eval(const struct nft_expr *expr,
+                         struct nft_regs *regs, const struct nft_pktinfo *pkt)
+{
+       struct nft_last_priv *priv = nft_expr_priv(expr);
+
+       priv->last_jiffies = jiffies;
+       priv->last_set = 1;
+}
+
+static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_last_priv *priv = nft_expr_priv(expr);
+       __be64 msecs;
+
+       if (time_before(jiffies, priv->last_jiffies))
+               priv->last_set = 0;
+
+       if (priv->last_set)
+               msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
+       else
+               msecs = 0;
+
+       if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
+           nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static const struct nft_expr_ops nft_last_ops = {
+       .type           = &nft_last_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_last_priv)),
+       .eval           = nft_last_eval,
+       .init           = nft_last_init,
+       .dump           = nft_last_dump,
+};
+
+struct nft_expr_type nft_last_type __read_mostly = {
+       .name           = "last",
+       .ops            = &nft_last_ops,
+       .policy         = nft_last_policy,
+       .maxattr        = NFTA_LAST_MAX,
+       .flags          = NFT_EXPR_STATEFUL,
+       .owner          = THIS_MODULE,
+};
index 41109c3..2898263 100644 (file)
@@ -13,6 +13,7 @@ openvswitch-y := \
        flow_netlink.o \
        flow_table.o \
        meter.o \
+       openvswitch_trace.o \
        vport.o \
        vport-internal_dev.o \
        vport-netdev.o
@@ -24,3 +25,5 @@ endif
 obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
 obj-$(CONFIG_OPENVSWITCH_GRE)  += vport-gre.o
+
+CFLAGS_openvswitch_trace.o = -I$(src)
index 77d924a..ef15d9e 100644 (file)
@@ -30,6 +30,7 @@
 #include "conntrack.h"
 #include "vport.h"
 #include "flow_netlink.h"
+#include "openvswitch_trace.h"
 
 struct deferred_action {
        struct sk_buff *skb;
@@ -1242,6 +1243,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
             a = nla_next(a, &rem)) {
                int err = 0;
 
+               if (trace_ovs_do_execute_action_enabled())
+                       trace_ovs_do_execute_action(dp, skb, key, a, rem);
+
                switch (nla_type(a)) {
                case OVS_ACTION_ATTR_OUTPUT: {
                        int port = nla_get_u32(a);
index 9d6ef6c..bc164b3 100644 (file)
@@ -43,6 +43,7 @@
 #include "flow_table.h"
 #include "flow_netlink.h"
 #include "meter.h"
+#include "openvswitch_trace.h"
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
@@ -275,6 +276,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
        struct dp_stats_percpu *stats;
        int err;
 
+       if (trace_ovs_dp_upcall_enabled())
+               trace_ovs_dp_upcall(dp, skb, key, upcall_info);
+
        if (upcall_info->portid == 0) {
                err = -ENOTCONN;
                goto err;
diff --git a/net/openvswitch/openvswitch_trace.c b/net/openvswitch/openvswitch_trace.c
new file mode 100644 (file)
index 0000000..62c5f7d
--- /dev/null
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/* bug in tracepoint.h, it should include this */
+#include <linux/module.h>
+
+/* sparse isn't too happy with all macros... */
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "openvswitch_trace.h"
+
+#endif
diff --git a/net/openvswitch/openvswitch_trace.h b/net/openvswitch/openvswitch_trace.h
new file mode 100644 (file)
index 0000000..3eb35d9
--- /dev/null
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM openvswitch
+
+#if !defined(_TRACE_OPENVSWITCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_OPENVSWITCH_H
+
+#include <linux/tracepoint.h>
+
+#include "datapath.h"
+
+TRACE_EVENT(ovs_do_execute_action,
+
+       TP_PROTO(struct datapath *dp, struct sk_buff *skb,
+                struct sw_flow_key *key, const struct nlattr *a, int rem),
+
+       TP_ARGS(dp, skb, key, a, rem),
+
+       TP_STRUCT__entry(
+               __field(        void *,         dpaddr                  )
+               __string(       dp_name,        ovs_dp_name(dp)         )
+               __string(       dev_name,       skb->dev->name          )
+               __field(        void *,         skbaddr                 )
+               __field(        unsigned int,   len                     )
+               __field(        unsigned int,   data_len                )
+               __field(        unsigned int,   truesize                )
+               __field(        u8,             nr_frags                )
+               __field(        u16,            gso_size                )
+               __field(        u16,            gso_type                )
+               __field(        u32,            ovs_flow_hash           )
+               __field(        u32,            recirc_id               )
+               __field(        void *,         keyaddr                 )
+               __field(        u16,            key_eth_type            )
+               __field(        u8,             key_ct_state            )
+               __field(        u8,             key_ct_orig_proto       )
+               __field(        u16,            key_ct_zone             )
+               __field(        unsigned int,   flow_key_valid          )
+               __field(        u8,             action_type             )
+               __field(        unsigned int,   action_len              )
+               __field(        void *,         action_data             )
+               __field(        u8,             is_last                 )
+       ),
+
+       TP_fast_assign(
+               __entry->dpaddr = dp;
+               __assign_str(dp_name, ovs_dp_name(dp));
+               __assign_str(dev_name, skb->dev->name);
+               __entry->skbaddr = skb;
+               __entry->len = skb->len;
+               __entry->data_len = skb->data_len;
+               __entry->truesize = skb->truesize;
+               __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+               __entry->gso_size = skb_shinfo(skb)->gso_size;
+               __entry->gso_type = skb_shinfo(skb)->gso_type;
+               __entry->ovs_flow_hash = key->ovs_flow_hash;
+               __entry->recirc_id = key->recirc_id;
+               __entry->keyaddr = key;
+               __entry->key_eth_type = key->eth.type;
+               __entry->key_ct_state = key->ct_state;
+               __entry->key_ct_orig_proto = key->ct_orig_proto;
+               __entry->key_ct_zone = key->ct_zone;
+               __entry->flow_key_valid = !(key->mac_proto & SW_FLOW_KEY_INVALID);
+               __entry->action_type = nla_type(a);
+               __entry->action_len = nla_len(a);
+               __entry->action_data = nla_data(a);
+               __entry->is_last = nla_is_last(a, rem);
+       ),
+
+       TP_printk("dpaddr=%p dp_name=%s dev=%s skbaddr=%p len=%u data_len=%u truesize=%u nr_frags=%d gso_size=%d gso_type=%#x ovs_flow_hash=0x%08x recirc_id=0x%08x keyaddr=%p eth_type=0x%04x ct_state=%02x ct_orig_proto=%02x ct_Zone=%04x flow_key_valid=%d action_type=%u action_len=%u action_data=%p is_last=%d",
+                 __entry->dpaddr, __get_str(dp_name), __get_str(dev_name),
+                 __entry->skbaddr, __entry->len, __entry->data_len,
+                 __entry->truesize, __entry->nr_frags, __entry->gso_size,
+                 __entry->gso_type, __entry->ovs_flow_hash,
+                 __entry->recirc_id, __entry->keyaddr, __entry->key_eth_type,
+                 __entry->key_ct_state, __entry->key_ct_orig_proto,
+                 __entry->key_ct_zone,
+                 __entry->flow_key_valid,
+                 __entry->action_type, __entry->action_len,
+                 __entry->action_data, __entry->is_last)
+);
+
+TRACE_EVENT(ovs_dp_upcall,
+
+       TP_PROTO(struct datapath *dp, struct sk_buff *skb,
+                const struct sw_flow_key *key,
+                const struct dp_upcall_info *upcall_info),
+
+       TP_ARGS(dp, skb, key, upcall_info),
+
+       TP_STRUCT__entry(
+               __field(        void *,         dpaddr                  )
+               __string(       dp_name,        ovs_dp_name(dp)         )
+               __string(       dev_name,       skb->dev->name          )
+               __field(        void *,         skbaddr                 )
+               __field(        unsigned int,   len                     )
+               __field(        unsigned int,   data_len                )
+               __field(        unsigned int,   truesize                )
+               __field(        u8,             nr_frags                )
+               __field(        u16,            gso_size                )
+               __field(        u16,            gso_type                )
+               __field(        u32,            ovs_flow_hash           )
+               __field(        u32,            recirc_id               )
+               __field(        const void *,   keyaddr                 )
+               __field(        u16,            key_eth_type            )
+               __field(        u8,             key_ct_state            )
+               __field(        u8,             key_ct_orig_proto       )
+               __field(        u16,            key_ct_zone             )
+               __field(        unsigned int,   flow_key_valid          )
+               __field(        u8,             upcall_cmd              )
+               __field(        u32,            upcall_port             )
+               __field(        u16,            upcall_mru              )
+       ),
+
+       TP_fast_assign(
+               __entry->dpaddr = dp;
+               __assign_str(dp_name, ovs_dp_name(dp));
+               __assign_str(dev_name, skb->dev->name);
+               __entry->skbaddr = skb;
+               __entry->len = skb->len;
+               __entry->data_len = skb->data_len;
+               __entry->truesize = skb->truesize;
+               __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+               __entry->gso_size = skb_shinfo(skb)->gso_size;
+               __entry->gso_type = skb_shinfo(skb)->gso_type;
+               __entry->ovs_flow_hash = key->ovs_flow_hash;
+               __entry->recirc_id = key->recirc_id;
+               __entry->keyaddr = key;
+               __entry->key_eth_type = key->eth.type;
+               __entry->key_ct_state = key->ct_state;
+               __entry->key_ct_orig_proto = key->ct_orig_proto;
+               __entry->key_ct_zone = key->ct_zone;
+               __entry->flow_key_valid =  !(key->mac_proto & SW_FLOW_KEY_INVALID);
+               __entry->upcall_cmd = upcall_info->cmd;
+               __entry->upcall_port = upcall_info->portid;
+               __entry->upcall_mru = upcall_info->mru;
+       ),
+
+       TP_printk("dpaddr=%p dp_name=%s dev=%s skbaddr=%p len=%u data_len=%u truesize=%u nr_frags=%d gso_size=%d gso_type=%#x ovs_flow_hash=0x%08x recirc_id=0x%08x keyaddr=%p eth_type=0x%04x ct_state=%02x ct_orig_proto=%02x ct_zone=%04x flow_key_valid=%d upcall_cmd=%u upcall_port=%u upcall_mru=%u",
+                 __entry->dpaddr, __get_str(dp_name), __get_str(dev_name),
+                 __entry->skbaddr, __entry->len, __entry->data_len,
+                 __entry->truesize, __entry->nr_frags, __entry->gso_size,
+                 __entry->gso_type, __entry->ovs_flow_hash,
+                 __entry->recirc_id, __entry->keyaddr, __entry->key_eth_type,
+                 __entry->key_ct_state, __entry->key_ct_orig_proto,
+                 __entry->key_ct_zone,
+                 __entry->flow_key_valid,
+                 __entry->upcall_cmd, __entry->upcall_port,
+                 __entry->upcall_mru)
+);
+
+#endif /* _TRACE_OPENVSWITCH_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE openvswitch_trace
+#include <trace/define_trace.h>
index 71dd6b9..77b0cda 100644 (file)
@@ -2683,7 +2683,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        }
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
-               proto   = po->num;
+               proto   = READ_ONCE(po->num);
        } else {
                err = -EINVAL;
                if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2896,7 +2896,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
-               proto   = po->num;
+               proto   = READ_ONCE(po->num);
        } else {
                err = -EINVAL;
                if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -3034,10 +3034,13 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
 
-       if (po->tx_ring.pg_vec)
+       /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
+        * tpacket_snd() will redo the check safely.
+        */
+       if (data_race(po->tx_ring.pg_vec))
                return tpacket_snd(po, msg);
-       else
-               return packet_snd(sock, msg, len);
+
+       return packet_snd(sock, msg, len);
 }
 
 /*
@@ -3168,7 +3171,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
                        /* prevents packet_notifier() from calling
                         * register_prot_hook()
                         */
-                       po->num = 0;
+                       WRITE_ONCE(po->num, 0);
                        __unregister_prot_hook(sk, true);
                        rcu_read_lock();
                        dev_curr = po->prot_hook.dev;
@@ -3178,17 +3181,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
                }
 
                BUG_ON(po->running);
-               po->num = proto;
+               WRITE_ONCE(po->num, proto);
                po->prot_hook.type = proto;
 
                if (unlikely(unlisted)) {
                        dev_put(dev);
                        po->prot_hook.dev = NULL;
-                       po->ifindex = -1;
+                       WRITE_ONCE(po->ifindex, -1);
                        packet_cached_dev_reset(po);
                } else {
                        po->prot_hook.dev = dev;
-                       po->ifindex = dev ? dev->ifindex : 0;
+                       WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
                        packet_cached_dev_assign(po, dev);
                }
        }
@@ -3502,7 +3505,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
        uaddr->sa_family = AF_PACKET;
        memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
        rcu_read_lock();
-       dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+       dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
        if (dev)
                strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
        rcu_read_unlock();
@@ -3517,16 +3520,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
+       int ifindex;
 
        if (peer)
                return -EOPNOTSUPP;
 
+       ifindex = READ_ONCE(po->ifindex);
        sll->sll_family = AF_PACKET;
-       sll->sll_ifindex = po->ifindex;
-       sll->sll_protocol = po->num;
+       sll->sll_ifindex = ifindex;
+       sll->sll_protocol = READ_ONCE(po->num);
        sll->sll_pkttype = 0;
        rcu_read_lock();
-       dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
+       dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
        if (dev) {
                sll->sll_hatype = dev->type;
                sll->sll_halen = dev->addr_len;
@@ -4102,7 +4107,7 @@ static int packet_notifier(struct notifier_block *this,
                                }
                                if (msg == NETDEV_UNREGISTER) {
                                        packet_cached_dev_reset(po);
-                                       po->ifindex = -1;
+                                       WRITE_ONCE(po->ifindex, -1);
                                        if (po->prot_hook.dev)
                                                dev_put(po->prot_hook.dev);
                                        po->prot_hook.dev = NULL;
@@ -4408,7 +4413,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        was_running = po->running;
        num = po->num;
        if (was_running) {
-               po->num = 0;
+               WRITE_ONCE(po->num, 0);
                __unregister_prot_hook(sk, false);
        }
        spin_unlock(&po->bind_lock);
@@ -4443,7 +4448,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 
        spin_lock(&po->bind_lock);
        if (was_running) {
-               po->num = num;
+               WRITE_ONCE(po->num, num);
                register_prot_hook(sk);
        }
        spin_unlock(&po->bind_lock);
@@ -4613,8 +4618,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
                           s,
                           refcount_read(&s->sk_refcnt),
                           s->sk_type,
-                          ntohs(po->num),
-                          po->ifindex,
+                          ntohs(READ_ONCE(po->num)),
+                          READ_ONCE(po->ifindex),
                           po->running,
                           atomic_read(&s->sk_rmem_alloc),
                           from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
index c0477be..f2efaa4 100644 (file)
@@ -436,7 +436,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
        struct qrtr_sock *ipc;
        struct sk_buff *skb;
        struct qrtr_cb *cb;
-       unsigned int size;
+       size_t size;
        unsigned int ver;
        size_t hdrlen;
 
index 4db109f..5b426dc 100644 (file)
@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 
                if (rds_cmsg_recv(inc, msg, rs)) {
                        ret = -EFAULT;
-                       goto out;
+                       break;
                }
                rds_recvmsg_zcookie(rs, msg);
 
index 18edd9a..a656baa 100644 (file)
@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
        }
 
        err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
-       if (err == NF_ACCEPT &&
-           ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
-               if (maniptype == NF_NAT_MANIP_SRC)
-                       maniptype = NF_NAT_MANIP_DST;
-               else
-                       maniptype = NF_NAT_MANIP_SRC;
-
-               err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
+       if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
+               if (ct->status & IPS_SRC_NAT) {
+                       if (maniptype == NF_NAT_MANIP_SRC)
+                               maniptype = NF_NAT_MANIP_DST;
+                       else
+                               maniptype = NF_NAT_MANIP_SRC;
+
+                       err = ct_nat_execute(skb, ct, ctinfo, range,
+                                            maniptype);
+               } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+                       err = ct_nat_execute(skb, ct, ctinfo, NULL,
+                                            NF_NAT_MANIP_SRC);
+               }
        }
        return err;
 #else
index 2e704c7..d7869a9 100644 (file)
@@ -1531,13 +1531,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                                                       &mask->basic.n_proto,
                                                       TCA_FLOWER_UNSPEC,
                                                       sizeof(key->basic.n_proto));
-                                       mask->basic.n_proto = cpu_to_be16(0);
                                } else {
                                        key->basic.n_proto = ethertype;
+                                       mask->basic.n_proto = cpu_to_be16(~0);
                                }
                        }
                } else {
                        key->basic.n_proto = ethertype;
+                       mask->basic.n_proto = cpu_to_be16(~0);
                }
        }
 
index 7d37638..9515428 100644 (file)
@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
        }
 
        tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
-       if (!tcph)
+       if (!tcph || tcph->doff < 5)
                return NULL;
 
        return skb_header_pointer(skb, offset,
@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
                        length--;
                        continue;
                }
+               if (length < 2)
+                       break;
                opsize = *ptr++;
                if (opsize < 2 || opsize > length)
                        break;
@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
                        length--;
                        continue;
                }
+               if (length < 2)
+                       break;
                opsize = *ptr++;
                if (opsize < 2 || opsize > length)
                        break;
@@ -2338,7 +2342,7 @@ static int cake_config_precedence(struct Qdisc *sch)
 
 /*     List of known Diffserv codepoints:
  *
- *     Least Effort (CS1)
+ *     Least Effort (CS1, LE)
  *     Best Effort (CS0)
  *     Max Reliability & LLT "Lo" (TOS1)
  *     Max Throughput (TOS2)
@@ -2360,7 +2364,7 @@ static int cake_config_precedence(struct Qdisc *sch)
  *     Total 25 codepoints.
  */
 
-/*     List of traffic classes in RFC 4594:
+/*     List of traffic classes in RFC 4594, updated by RFC 8622:
  *             (roughly descending order of contended priority)
  *             (roughly ascending order of uncontended throughput)
  *
@@ -2375,7 +2379,7 @@ static int cake_config_precedence(struct Qdisc *sch)
  *     Ops, Admin, Management (CS2,TOS1) - eg. ssh
  *     Standard Service (CS0 & unrecognised codepoints)
  *     High Throughput Data (AF1x,TOS2)  - eg. web traffic
- *     Low Priority Data (CS1)           - eg. BitTorrent
+ *     Low Priority Data (CS1,LE)        - eg. BitTorrent
 
  *     Total 12 traffic classes.
  */
@@ -2391,7 +2395,7 @@ static int cake_config_diffserv8(struct Qdisc *sch)
  *             Video Streaming          (AF4x, AF3x, CS3)
  *             Bog Standard             (CS0 etc.)
  *             High Throughput          (AF1x, TOS2)
- *             Background Traffic       (CS1)
+ *             Background Traffic       (CS1, LE)
  *
  *             Total 8 traffic classes.
  */
@@ -2435,7 +2439,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
  *         Latency Sensitive  (CS7, CS6, EF, VA, CS5, CS4)
  *         Streaming Media    (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
  *         Best Effort        (CS0, AF1x, TOS2, and those not specified)
- *         Background Traffic (CS1)
+ *         Background Traffic (CS1, LE)
  *
  *             Total 4 traffic classes.
  */
@@ -2473,7 +2477,7 @@ static int cake_config_diffserv4(struct Qdisc *sch)
 static int cake_config_diffserv3(struct Qdisc *sch)
 {
 /*  Simplified Diffserv structure with 3 tins.
- *             Low Priority            (CS1)
+ *             Low Priority            (CS1, LE)
  *             Best Effort
  *             Latency Sensitive       (TOS4, VA, EF, CS6, CS7)
  */
index e9c0afc..d9ac60f 100644 (file)
@@ -52,6 +52,8 @@ static void qdisc_maybe_clear_missed(struct Qdisc *q,
         */
        if (!netif_xmit_frozen_or_stopped(txq))
                set_bit(__QDISC_STATE_MISSED, &q->state);
+       else
+               set_bit(__QDISC_STATE_DRAINING, &q->state);
 }
 
 /* Main transmission queue. */
@@ -164,9 +166,13 @@ static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 
                skb = next;
        }
-       if (lock)
+
+       if (lock) {
                spin_unlock(lock);
-       __netif_schedule(q);
+               set_bit(__QDISC_STATE_MISSED, &q->state);
+       } else {
+               __netif_schedule(q);
+       }
 }
 
 static void try_bulk_dequeue_skb(struct Qdisc *q,
@@ -409,7 +415,11 @@ void __qdisc_run(struct Qdisc *q)
        while (qdisc_restart(q, &packets)) {
                quota -= packets;
                if (quota <= 0) {
-                       __netif_schedule(q);
+                       if (q->flags & TCQ_F_NOLOCK)
+                               set_bit(__QDISC_STATE_MISSED, &q->state);
+                       else
+                               __netif_schedule(q);
+
                        break;
                }
        }
@@ -698,13 +708,14 @@ retry:
        if (likely(skb)) {
                qdisc_update_stats_at_dequeue(qdisc, skb);
        } else if (need_retry &&
-                  test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
+                  READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
                /* Delay clearing the STATE_MISSED here to reduce
                 * the overhead of the second spin_trylock() in
                 * qdisc_run_begin() and __netif_schedule() calling
                 * in qdisc_run_end().
                 */
                clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+               clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
 
                /* Make sure dequeuing happens after clearing
                 * STATE_MISSED.
@@ -714,8 +725,6 @@ retry:
                need_retry = false;
 
                goto retry;
-       } else {
-               WRITE_ONCE(qdisc->empty, true);
        }
 
        return skb;
@@ -916,7 +925,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
-       sch->empty = true;
        dev_hold(dev);
        refcount_set(&sch->refcnt, 1);
 
@@ -1222,6 +1230,7 @@ static void dev_reset_queue(struct net_device *dev,
        spin_unlock_bh(qdisc_lock(qdisc));
        if (nolock) {
                clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+               clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
                spin_unlock_bh(&qdisc->seqlock);
        }
 }
index 336df4b..be29da0 100644 (file)
@@ -98,6 +98,7 @@ static struct sctp_association *sctp_association_init(
         * sock configured value.
         */
        asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
+       asoc->probe_interval = msecs_to_jiffies(sp->probe_interval);
 
        asoc->encap_port = sp->encap_port;
 
@@ -625,6 +626,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
         * association configured value.
         */
        peer->hbinterval = asoc->hbinterval;
+       peer->probe_interval = asoc->probe_interval;
 
        peer->encap_port = asoc->encap_port;
 
@@ -714,6 +716,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                return NULL;
        }
 
+       sctp_transport_pl_reset(peer);
+
        /* Attach the remote transport to our asoc.  */
        list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
        asoc->peer.transport_count++;
@@ -812,6 +816,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                        spc_state = SCTP_ADDR_CONFIRMED;
 
                transport->state = SCTP_ACTIVE;
+               sctp_transport_pl_reset(transport);
                break;
 
        case SCTP_TRANSPORT_DOWN:
@@ -821,6 +826,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                 */
                if (transport->state != SCTP_UNCONFIRMED) {
                        transport->state = SCTP_INACTIVE;
+                       sctp_transport_pl_reset(transport);
                        spc_state = SCTP_ADDR_UNREACHABLE;
                } else {
                        sctp_transport_dst_release(transport);
index c4d9c7f..ccd773e 100644 (file)
@@ -154,6 +154,7 @@ static const char *const sctp_timer_tbl[] = {
        "TIMEOUT_T5_SHUTDOWN_GUARD",
        "TIMEOUT_HEARTBEAT",
        "TIMEOUT_RECONF",
+       "TIMEOUT_PROBE",
        "TIMEOUT_SACK",
        "TIMEOUT_AUTOCLOSE",
 };
index d508f6f..fe6429c 100644 (file)
@@ -385,7 +385,9 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
                           struct sctp_transport *t, __u32 pmtu)
 {
-       if (!t || (t->pathmtu <= pmtu))
+       if (!t ||
+           (t->pathmtu <= pmtu &&
+            t->pl.probe_size + sctp_transport_pl_hlen(t) <= pmtu))
                return;
 
        if (sock_owned_by_user(sk)) {
@@ -554,6 +556,49 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
        sctp_transport_put(t);
 }
 
+static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+                              __u8 type, __u8 code, __u32 info)
+{
+       struct sctp_association *asoc = t->asoc;
+       struct sock *sk = asoc->base.sk;
+       int err = 0;
+
+       switch (type) {
+       case ICMP_PARAMETERPROB:
+               err = EPROTO;
+               break;
+       case ICMP_DEST_UNREACH:
+               if (code > NR_ICMP_UNREACH)
+                       return;
+               if (code == ICMP_FRAG_NEEDED) {
+                       sctp_icmp_frag_needed(sk, asoc, t, SCTP_TRUNC4(info));
+                       return;
+               }
+               if (code == ICMP_PROT_UNREACH) {
+                       sctp_icmp_proto_unreachable(sk, asoc, t);
+                       return;
+               }
+               err = icmp_err_convert[code].errno;
+               break;
+       case ICMP_TIME_EXCEEDED:
+               if (code == ICMP_EXC_FRAGTIME)
+                       return;
+
+               err = EHOSTUNREACH;
+               break;
+       case ICMP_REDIRECT:
+               sctp_icmp_redirect(sk, t, skb);
+       default:
+               return;
+       }
+       if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) {
+               sk->sk_err = err;
+               sk->sk_error_report(sk);
+       } else {  /* Only an error on timeout */
+               sk->sk_err_soft = err;
+       }
+}
+
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -572,22 +617,19 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
 int sctp_v4_err(struct sk_buff *skb, __u32 info)
 {
        const struct iphdr *iph = (const struct iphdr *)skb->data;
-       const int ihlen = iph->ihl * 4;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
-       struct sock *sk;
-       struct sctp_association *asoc = NULL;
+       struct net *net = dev_net(skb->dev);
        struct sctp_transport *transport;
-       struct inet_sock *inet;
+       struct sctp_association *asoc;
        __u16 saveip, savesctp;
-       int err;
-       struct net *net = dev_net(skb->dev);
+       struct sock *sk;
 
        /* Fix up skb to look at the embedded net header. */
        saveip = skb->network_header;
        savesctp = skb->transport_header;
        skb_reset_network_header(skb);
-       skb_set_transport_header(skb, ihlen);
+       skb_set_transport_header(skb, iph->ihl * 4);
        sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original values. */
        skb->network_header = saveip;
@@ -596,59 +638,41 @@ int sctp_v4_err(struct sk_buff *skb, __u32 info)
                __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return -ENOENT;
        }
-       /* Warning:  The sock lock is held.  Remember to call
-        * sctp_err_finish!
-        */
 
-       switch (type) {
-       case ICMP_PARAMETERPROB:
-               err = EPROTO;
-               break;
-       case ICMP_DEST_UNREACH:
-               if (code > NR_ICMP_UNREACH)
-                       goto out_unlock;
+       sctp_v4_err_handle(transport, skb, type, code, info);
+       sctp_err_finish(sk, transport);
 
-               /* PMTU discovery (RFC1191) */
-               if (ICMP_FRAG_NEEDED == code) {
-                       sctp_icmp_frag_needed(sk, asoc, transport,
-                                             SCTP_TRUNC4(info));
-                       goto out_unlock;
-               } else {
-                       if (ICMP_PROT_UNREACH == code) {
-                               sctp_icmp_proto_unreachable(sk, asoc,
-                                                           transport);
-                               goto out_unlock;
-                       }
-               }
-               err = icmp_err_convert[code].errno;
-               break;
-       case ICMP_TIME_EXCEEDED:
-               /* Ignore any time exceeded errors due to fragment reassembly
-                * timeouts.
-                */
-               if (ICMP_EXC_FRAGTIME == code)
-                       goto out_unlock;
+       return 0;
+}
 
-               err = EHOSTUNREACH;
-               break;
-       case ICMP_REDIRECT:
-               sctp_icmp_redirect(sk, transport, skb);
-               /* Fall through to out_unlock. */
-       default:
-               goto out_unlock;
+int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb)
+{
+       struct net *net = dev_net(skb->dev);
+       struct sctp_association *asoc;
+       struct sctp_transport *t;
+       struct icmphdr *hdr;
+       __u32 info = 0;
+
+       skb->transport_header += sizeof(struct udphdr);
+       sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &t);
+       if (!sk) {
+               __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
+               return -ENOENT;
        }
 
-       inet = inet_sk(sk);
-       if (!sock_owned_by_user(sk) && inet->recverr) {
-               sk->sk_err = err;
-               sk->sk_error_report(sk);
-       } else {  /* Only an error on timeout */
-               sk->sk_err_soft = err;
+       skb->transport_header -= sizeof(struct udphdr);
+       hdr = (struct icmphdr *)(skb_network_header(skb) - sizeof(struct icmphdr));
+       if (hdr->type == ICMP_REDIRECT) {
+               /* can't be handled without outer iphdr known, leave it to udp_err */
+               sctp_err_finish(sk, t);
+               return 0;
        }
+       if (hdr->type == ICMP_DEST_UNREACH && hdr->code == ICMP_FRAG_NEEDED)
+               info = ntohs(hdr->un.frag.mtu);
+       sctp_v4_err_handle(t, skb, hdr->type, hdr->code, info);
 
-out_unlock:
-       sctp_err_finish(sk, transport);
-       return 0;
+       sctp_err_finish(sk, t);
+       return 1;
 }
 
 /*
index bd08807..05f81a4 100644 (file)
@@ -122,54 +122,28 @@ static struct notifier_block sctp_inet6addr_notifier = {
        .notifier_call = sctp_inet6addr_event,
 };
 
-/* ICMP error handler. */
-static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-                       u8 type, u8 code, int offset, __be32 info)
+static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb,
+                              __u8 type, __u8 code, __u32 info)
 {
-       struct inet6_dev *idev;
-       struct sock *sk;
-       struct sctp_association *asoc;
-       struct sctp_transport *transport;
+       struct sctp_association *asoc = t->asoc;
+       struct sock *sk = asoc->base.sk;
        struct ipv6_pinfo *np;
-       __u16 saveip, savesctp;
-       int err, ret = 0;
-       struct net *net = dev_net(skb->dev);
-
-       idev = in6_dev_get(skb->dev);
-
-       /* Fix up skb to look at the embedded net header. */
-       saveip   = skb->network_header;
-       savesctp = skb->transport_header;
-       skb_reset_network_header(skb);
-       skb_set_transport_header(skb, offset);
-       sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
-       /* Put back, the original pointers. */
-       skb->network_header   = saveip;
-       skb->transport_header = savesctp;
-       if (!sk) {
-               __ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
-               ret = -ENOENT;
-               goto out;
-       }
-
-       /* Warning:  The sock lock is held.  Remember to call
-        * sctp_err_finish!
-        */
+       int err = 0;
 
        switch (type) {
        case ICMPV6_PKT_TOOBIG:
                if (ip6_sk_accept_pmtu(sk))
-                       sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info));
-               goto out_unlock;
+                       sctp_icmp_frag_needed(sk, asoc, t, info);
+               return;
        case ICMPV6_PARAMPROB:
                if (ICMPV6_UNK_NEXTHDR == code) {
-                       sctp_icmp_proto_unreachable(sk, asoc, transport);
-                       goto out_unlock;
+                       sctp_icmp_proto_unreachable(sk, asoc, t);
+                       return;
                }
                break;
        case NDISC_REDIRECT:
-               sctp_icmp_redirect(sk, transport, skb);
-               goto out_unlock;
+               sctp_icmp_redirect(sk, t, skb);
+               return;
        default:
                break;
        }
@@ -179,17 +153,69 @@ static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!sock_owned_by_user(sk) && np->recverr) {
                sk->sk_err = err;
                sk->sk_error_report(sk);
-       } else {  /* Only an error on timeout */
+       } else {
                sk->sk_err_soft = err;
        }
+}
+
+/* ICMP error handler. */
+static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                      u8 type, u8 code, int offset, __be32 info)
+{
+       struct net *net = dev_net(skb->dev);
+       struct sctp_transport *transport;
+       struct sctp_association *asoc;
+       __u16 saveip, savesctp;
+       struct sock *sk;
+
+       /* Fix up skb to look at the embedded net header. */
+       saveip   = skb->network_header;
+       savesctp = skb->transport_header;
+       skb_reset_network_header(skb);
+       skb_set_transport_header(skb, offset);
+       sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+       /* Put back, the original pointers. */
+       skb->network_header   = saveip;
+       skb->transport_header = savesctp;
+       if (!sk) {
+               __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+               return -ENOENT;
+       }
 
-out_unlock:
+       sctp_v6_err_handle(transport, skb, type, code, ntohl(info));
        sctp_err_finish(sk, transport);
-out:
-       if (likely(idev != NULL))
-               in6_dev_put(idev);
 
-       return ret;
+       return 0;
+}
+
+int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb)
+{
+       struct net *net = dev_net(skb->dev);
+       struct sctp_association *asoc;
+       struct sctp_transport *t;
+       struct icmp6hdr *hdr;
+       __u32 info = 0;
+
+       skb->transport_header += sizeof(struct udphdr);
+       sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &t);
+       if (!sk) {
+               __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+               return -ENOENT;
+       }
+
+       skb->transport_header -= sizeof(struct udphdr);
+       hdr = (struct icmp6hdr *)(skb_network_header(skb) - sizeof(struct icmp6hdr));
+       if (hdr->icmp6_type == NDISC_REDIRECT) {
+               /* can't be handled without outer ip6hdr known, leave it to udpv6_err */
+               sctp_err_finish(sk, t);
+               return 0;
+       }
+       if (hdr->icmp6_type == ICMPV6_PKT_TOOBIG)
+               info = ntohl(hdr->icmp6_mtu);
+       sctp_v6_err_handle(t, skb, hdr->icmp6_type, hdr->icmp6_code, info);
+
+       sctp_err_finish(sk, t);
+       return 1;
 }
 
 static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
index a6aa17d..9032ce6 100644 (file)
@@ -103,7 +103,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
                sctp_transport_route(tp, NULL, sp);
                if (asoc->param_flags & SPP_PMTUD_ENABLE)
                        sctp_assoc_sync_pmtu(asoc);
-       } else if (!sctp_transport_pmtu_check(tp)) {
+       } else if (!sctp_transport_pl_enabled(tp) &&
+                  !sctp_transport_pmtu_check(tp)) {
                if (asoc->param_flags & SPP_PMTUD_ENABLE)
                        sctp_assoc_sync_pmtu(asoc);
        }
@@ -211,6 +212,30 @@ enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
        return retval;
 }
 
+/* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
+static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk)
+{
+       struct sctp_transport *t = pkt->transport;
+       struct sctp_chunk *pad;
+       int overhead = 0;
+
+       if (!chunk->pmtu_probe)
+               return SCTP_XMIT_OK;
+
+       /* calculate the Padding Data size for the pad chunk */
+       overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+       overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk);
+       pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead);
+       if (!pad)
+               return SCTP_XMIT_DELAY;
+
+       list_add_tail(&pad->list, &pkt->chunk_list);
+       pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length));
+       chunk->transport = t;
+
+       return SCTP_XMIT_OK;
+}
+
 /* Try to bundle an auth chunk into the packet. */
 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
                                              struct sctp_chunk *chunk)
@@ -382,6 +407,10 @@ enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
                goto finish;
 
        retval = __sctp_packet_append_chunk(packet, chunk);
+       if (retval != SCTP_XMIT_OK)
+               goto finish;
+
+       retval = sctp_packet_bundle_pad(packet, chunk);
 
 finish:
        return retval;
@@ -553,7 +582,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        sk = chunk->skb->sk;
 
        /* check gso */
-       if (packet->size > tp->pathmtu && !packet->ipfragok) {
+       if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) {
                if (!sk_can_gso(sk)) {
                        pr_err_once("Trying to GSO but underlying device doesn't support it.");
                        goto out;
index 5cb1aa5..ff47091 100644 (file)
@@ -769,7 +769,11 @@ static int sctp_packet_singleton(struct sctp_transport *transport,
 
        sctp_packet_init(&singleton, transport, sport, dport);
        sctp_packet_config(&singleton, vtag, 0);
-       sctp_packet_append_chunk(&singleton, chunk);
+       if (sctp_packet_append_chunk(&singleton, chunk) != SCTP_XMIT_OK) {
+               list_del_init(&chunk->list);
+               sctp_chunk_free(chunk);
+               return -ENOMEM;
+       }
        return sctp_packet_transmit(&singleton, gfp);
 }
 
@@ -929,8 +933,13 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
                        one_packet = 1;
                        fallthrough;
 
-               case SCTP_CID_SACK:
                case SCTP_CID_HEARTBEAT:
+                       if (chunk->pmtu_probe) {
+                               sctp_packet_singleton(ctx->transport, chunk, ctx->gfp);
+                               break;
+                       }
+                       fallthrough;
+               case SCTP_CID_SACK:
                case SCTP_CID_SHUTDOWN:
                case SCTP_CID_ECN_ECNE:
                case SCTP_CID_ASCONF:
index baa4e77..bc5db0b 100644 (file)
@@ -850,23 +850,6 @@ static int sctp_udp_rcv(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
-static int sctp_udp_err_lookup(struct sock *sk, struct sk_buff *skb)
-{
-       struct sctp_association *asoc;
-       struct sctp_transport *t;
-       int family;
-
-       skb->transport_header += sizeof(struct udphdr);
-       family = (ip_hdr(skb)->version == 4) ? AF_INET : AF_INET6;
-       sk = sctp_err_lookup(dev_net(skb->dev), family, skb, sctp_hdr(skb),
-                            &asoc, &t);
-       if (!sk)
-               return -ENOENT;
-
-       sctp_err_finish(sk, t);
-       return 0;
-}
-
 int sctp_udp_sock_start(struct net *net)
 {
        struct udp_tunnel_sock_cfg tuncfg = {NULL};
@@ -885,7 +868,7 @@ int sctp_udp_sock_start(struct net *net)
 
        tuncfg.encap_type = 1;
        tuncfg.encap_rcv = sctp_udp_rcv;
-       tuncfg.encap_err_lookup = sctp_udp_err_lookup;
+       tuncfg.encap_err_lookup = sctp_udp_v4_err;
        setup_udp_tunnel_sock(net, sock, &tuncfg);
        net->sctp.udp4_sock = sock->sk;
 
@@ -907,7 +890,7 @@ int sctp_udp_sock_start(struct net *net)
 
        tuncfg.encap_type = 1;
        tuncfg.encap_rcv = sctp_udp_rcv;
-       tuncfg.encap_err_lookup = sctp_udp_err_lookup;
+       tuncfg.encap_err_lookup = sctp_udp_v6_err;
        setup_udp_tunnel_sock(net, sock, &tuncfg);
        net->sctp.udp6_sock = sock->sk;
 #endif
index 5b44d22..b0eaa93 100644 (file)
@@ -1160,7 +1160,8 @@ nodata:
 
 /* Make a HEARTBEAT chunk.  */
 struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
-                                      const struct sctp_transport *transport)
+                                      const struct sctp_transport *transport,
+                                      __u32 probe_size)
 {
        struct sctp_sender_hb_info hbinfo;
        struct sctp_chunk *retval;
@@ -1176,6 +1177,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
        hbinfo.daddr = transport->ipaddr;
        hbinfo.sent_at = jiffies;
        hbinfo.hb_nonce = transport->hb_nonce;
+       hbinfo.probe_size = probe_size;
 
        /* Cast away the 'const', as this is just telling the chunk
         * what transport it belongs to.
@@ -1183,6 +1185,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
        retval->transport = (struct sctp_transport *) transport;
        retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo),
                                                &hbinfo);
+       retval->pmtu_probe = !!probe_size;
 
 nodata:
        return retval;
@@ -1218,6 +1221,32 @@ nodata:
        return retval;
 }
 
+/* RFC4820 3. Padding Chunk (PAD)
+ *  0                   1                   2                   3
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0x84   |   Flags=0     |             Length            |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                                                               |
+ * \                         Padding Data                          /
+ * /                                                               \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_pad(const struct sctp_association *asoc, int len)
+{
+       struct sctp_chunk *retval;
+
+       retval = sctp_make_control(asoc, SCTP_CID_PAD, 0, len, GFP_ATOMIC);
+       if (!retval)
+               return NULL;
+
+       skb_put_zero(retval->skb, len);
+       retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + len);
+       retval->chunk_end = skb_tail_pointer(retval->skb);
+
+       return retval;
+}
+
 /* Create an Operation Error chunk with the specified space reserved.
  * This routine can be used for containing multiple causes in the chunk.
  */
index ce15d59..b3815b5 100644 (file)
@@ -471,6 +471,38 @@ out_unlock:
        sctp_transport_put(transport);
 }
 
+/* Handle the timeout of the probe timer. */
+void sctp_generate_probe_event(struct timer_list *t)
+{
+       struct sctp_transport *transport = from_timer(transport, t, probe_timer);
+       struct sctp_association *asoc = transport->asoc;
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
+       int error = 0;
+
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
+               pr_debug("%s: sock is busy\n", __func__);
+
+               /* Try again later.  */
+               if (!mod_timer(&transport->probe_timer, jiffies + (HZ / 20)))
+                       sctp_transport_hold(transport);
+               goto out_unlock;
+       }
+
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+                          SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_PROBE),
+                          asoc->state, asoc->ep, asoc,
+                          transport, GFP_ATOMIC);
+
+       if (error)
+               sk->sk_err = -error;
+
+out_unlock:
+       bh_unlock_sock(sk);
+       sctp_transport_put(transport);
+}
+
 /* Inject a SACK Timeout event into the state machine.  */
 static void sctp_generate_sack_event(struct timer_list *t)
 {
@@ -1641,6 +1673,11 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        sctp_cmd_hb_timers_stop(commands, asoc);
                        break;
 
+               case SCTP_CMD_PROBE_TIMER_UPDATE:
+                       t = cmd->obj.transport;
+                       sctp_transport_reset_probe_timer(t);
+                       break;
+
                case SCTP_CMD_REPORT_ERROR:
                        error = cmd->obj.error;
                        break;
index 4f30388..09a8f23 100644 (file)
@@ -1004,7 +1004,7 @@ static enum sctp_disposition sctp_sf_heartbeat(
        struct sctp_chunk *reply;
 
        /* Send a heartbeat to our peer.  */
-       reply = sctp_make_heartbeat(asoc, transport);
+       reply = sctp_make_heartbeat(asoc, transport, 0);
        if (!reply)
                return SCTP_DISPOSITION_NOMEM;
 
@@ -1095,6 +1095,32 @@ enum sctp_disposition sctp_sf_send_reconf(struct net *net,
        return SCTP_DISPOSITION_CONSUME;
 }
 
+/* send hb chunk with padding for PLPMUTD.  */
+enum sctp_disposition sctp_sf_send_probe(struct net *net,
+                                        const struct sctp_endpoint *ep,
+                                        const struct sctp_association *asoc,
+                                        const union sctp_subtype type,
+                                        void *arg,
+                                        struct sctp_cmd_seq *commands)
+{
+       struct sctp_transport *transport = (struct sctp_transport *)arg;
+       struct sctp_chunk *reply;
+
+       if (!sctp_transport_pl_enabled(transport))
+               return SCTP_DISPOSITION_CONSUME;
+
+       sctp_transport_pl_send(transport);
+
+       reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+       if (!reply)
+               return SCTP_DISPOSITION_NOMEM;
+       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+       sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
+                       SCTP_TRANSPORT(transport));
+
+       return SCTP_DISPOSITION_CONSUME;
+}
+
 /*
  * Process an heartbeat request.
  *
@@ -1243,6 +1269,18 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
        if (hbinfo->hb_nonce != link->hb_nonce)
                return SCTP_DISPOSITION_DISCARD;
 
+       if (hbinfo->probe_size) {
+               if (hbinfo->probe_size != link->pl.probe_size ||
+                   !sctp_transport_pl_enabled(link))
+                       return SCTP_DISPOSITION_DISCARD;
+
+               sctp_transport_pl_recv(link);
+               if (link->pl.state == SCTP_PL_COMPLETE)
+                       return SCTP_DISPOSITION_CONSUME;
+
+               return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
+       }
+
        max_interval = link->hbinterval + link->rto;
 
        /* Check if the timestamp looks valid.  */
index 88ea87f..1816a44 100644 (file)
@@ -527,6 +527,26 @@ auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = {
 }; /*state_fn_t auth_chunk_event_table[][] */
 
 static const struct sctp_sm_table_entry
+pad_chunk_event_table[SCTP_STATE_NUM_STATES] = {
+       /* SCTP_STATE_CLOSED */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+       /* SCTP_STATE_COOKIE_WAIT */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+       /* SCTP_STATE_COOKIE_ECHOED */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+       /* SCTP_STATE_ESTABLISHED */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+       /* SCTP_STATE_SHUTDOWN_PENDING */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+       /* SCTP_STATE_SHUTDOWN_SENT */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+       /* SCTP_STATE_SHUTDOWN_RECEIVED */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+       /* SCTP_STATE_SHUTDOWN_ACK_SENT */
+       TYPE_SCTP_FUNC(sctp_sf_discard_chunk),
+};     /* chunk pad */
+
+static const struct sctp_sm_table_entry
 chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
        /* SCTP_STATE_CLOSED */
        TYPE_SCTP_FUNC(sctp_sf_ootb),
@@ -947,6 +967,25 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = {
        TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
+#define TYPE_SCTP_EVENT_TIMEOUT_PROBE { \
+       /* SCTP_STATE_CLOSED */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_COOKIE_WAIT */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_COOKIE_ECHOED */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_ESTABLISHED */ \
+       TYPE_SCTP_FUNC(sctp_sf_send_probe), \
+       /* SCTP_STATE_SHUTDOWN_PENDING */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_SHUTDOWN_SENT */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
 static const struct sctp_sm_table_entry
 timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
        TYPE_SCTP_EVENT_TIMEOUT_NONE,
@@ -958,6 +997,7 @@ timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
        TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
        TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT,
        TYPE_SCTP_EVENT_TIMEOUT_RECONF,
+       TYPE_SCTP_EVENT_TIMEOUT_PROBE,
        TYPE_SCTP_EVENT_TIMEOUT_SACK,
        TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 };
@@ -992,6 +1032,9 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
 
        case SCTP_CID_AUTH:
                return &auth_chunk_event_table[0][state];
+
+       case SCTP_CID_PAD:
+               return &pad_chunk_event_table[state];
        }
 
        return &chunk_event_table_unknown[state];
index a79d193..e64e01f 100644 (file)
@@ -2496,6 +2496,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                                sctp_transport_pmtu(trans, sctp_opt2sk(sp));
                                sctp_assoc_sync_pmtu(asoc);
                        }
+                       sctp_transport_pl_reset(trans);
                } else if (asoc) {
                        asoc->param_flags =
                                (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
@@ -4481,6 +4482,61 @@ static int sctp_setsockopt_encap_port(struct sock *sk,
        return 0;
 }
 
+static int sctp_setsockopt_probe_interval(struct sock *sk,
+                                         struct sctp_probeinterval *params,
+                                         unsigned int optlen)
+{
+       struct sctp_association *asoc;
+       struct sctp_transport *t;
+       __u32 probe_interval;
+
+       if (optlen != sizeof(*params))
+               return -EINVAL;
+
+       probe_interval = params->spi_interval;
+       if (probe_interval && probe_interval < SCTP_PROBE_TIMER_MIN)
+               return -EINVAL;
+
+       /* If an address other than INADDR_ANY is specified, and
+        * no transport is found, then the request is invalid.
+        */
+       if (!sctp_is_any(sk, (union sctp_addr *)&params->spi_address)) {
+               t = sctp_addr_id2transport(sk, &params->spi_address,
+                                          params->spi_assoc_id);
+               if (!t)
+                       return -EINVAL;
+
+               t->probe_interval = msecs_to_jiffies(probe_interval);
+               sctp_transport_pl_reset(t);
+               return 0;
+       }
+
+       /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+        * socket is a one to many style socket, and an association
+        * was not found, then the id was invalid.
+        */
+       asoc = sctp_id2assoc(sk, params->spi_assoc_id);
+       if (!asoc && params->spi_assoc_id != SCTP_FUTURE_ASSOC &&
+           sctp_style(sk, UDP))
+               return -EINVAL;
+
+       /* If changes are for association, also apply probe_interval to
+        * each transport.
+        */
+       if (asoc) {
+               list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
+                       t->probe_interval = msecs_to_jiffies(probe_interval);
+                       sctp_transport_pl_reset(t);
+               }
+
+               asoc->probe_interval = msecs_to_jiffies(probe_interval);
+               return 0;
+       }
+
+       sctp_sk(sk)->probe_interval = probe_interval;
+       return 0;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4703,6 +4759,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        case SCTP_REMOTE_UDP_ENCAPS_PORT:
                retval = sctp_setsockopt_encap_port(sk, kopt, optlen);
                break;
+       case SCTP_PLPMTUD_PROBE_INTERVAL:
+               retval = sctp_setsockopt_probe_interval(sk, kopt, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -4989,6 +5048,7 @@ static int sctp_init_sock(struct sock *sk)
        atomic_set(&sp->pd_mode, 0);
        skb_queue_head_init(&sp->pd_lobby);
        sp->frag_interleave = 0;
+       sp->probe_interval = net->sctp.probe_interval;
 
        /* Create a per socket endpoint structure.  Even if we
         * change the data structure relationships, this may still
@@ -7905,6 +7965,66 @@ out:
        return 0;
 }
 
+static int sctp_getsockopt_probe_interval(struct sock *sk, int len,
+                                         char __user *optval,
+                                         int __user *optlen)
+{
+       struct sctp_probeinterval params;
+       struct sctp_association *asoc;
+       struct sctp_transport *t;
+       __u32 probe_interval;
+
+       if (len < sizeof(params))
+               return -EINVAL;
+
+       len = sizeof(params);
+       if (copy_from_user(&params, optval, len))
+               return -EFAULT;
+
+       /* If an address other than INADDR_ANY is specified, and
+        * no transport is found, then the request is invalid.
+        */
+       if (!sctp_is_any(sk, (union sctp_addr *)&params.spi_address)) {
+               t = sctp_addr_id2transport(sk, &params.spi_address,
+                                          params.spi_assoc_id);
+               if (!t) {
+                       pr_debug("%s: failed no transport\n", __func__);
+                       return -EINVAL;
+               }
+
+               probe_interval = jiffies_to_msecs(t->probe_interval);
+               goto out;
+       }
+
+       /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
+        * socket is a one to many style socket, and an association
+        * was not found, then the id was invalid.
+        */
+       asoc = sctp_id2assoc(sk, params.spi_assoc_id);
+       if (!asoc && params.spi_assoc_id != SCTP_FUTURE_ASSOC &&
+           sctp_style(sk, UDP)) {
+               pr_debug("%s: failed no association\n", __func__);
+               return -EINVAL;
+       }
+
+       if (asoc) {
+               probe_interval = jiffies_to_msecs(asoc->probe_interval);
+               goto out;
+       }
+
+       probe_interval = sctp_sk(sk)->probe_interval;
+
+out:
+       params.spi_interval = probe_interval;
+       if (copy_to_user(optval, &params, len))
+               return -EFAULT;
+
+       if (put_user(len, optlen))
+               return -EFAULT;
+
+       return 0;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, int __user *optlen)
 {
@@ -8128,6 +8248,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
        case SCTP_REMOTE_UDP_ENCAPS_PORT:
                retval = sctp_getsockopt_encap_port(sk, len, optval, optlen);
                break;
+       case SCTP_PLPMTUD_PROBE_INTERVAL:
+               retval = sctp_getsockopt_probe_interval(sk, len, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
index 55871b2..b46a416 100644 (file)
@@ -55,6 +55,8 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
                                   void *buffer, size_t *lenp, loff_t *ppos);
 static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
                             void *buffer, size_t *lenp, loff_t *ppos);
+static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+                                      void *buffer, size_t *lenp, loff_t *ppos);
 
 static struct ctl_table sctp_table[] = {
        {
@@ -294,6 +296,13 @@ static struct ctl_table sctp_net_table[] = {
                .proc_handler   = proc_dointvec,
        },
        {
+               .procname       = "plpmtud_probe_interval",
+               .data           = &init_net.sctp.probe_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_sctp_do_probe_interval,
+       },
+       {
                .procname       = "udp_port",
                .data           = &init_net.sctp.udp_port,
                .maxlen         = sizeof(int),
@@ -539,6 +548,32 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write,
+                                      void *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct net *net = current->nsproxy->net_ns;
+       struct ctl_table tbl;
+       int ret, new_value;
+
+       memset(&tbl, 0, sizeof(struct ctl_table));
+       tbl.maxlen = sizeof(unsigned int);
+
+       if (write)
+               tbl.data = &new_value;
+       else
+               tbl.data = &net->sctp.probe_interval;
+
+       ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+       if (write && ret == 0) {
+               if (new_value && new_value < SCTP_PROBE_TIMER_MIN)
+                       return -EINVAL;
+
+               net->sctp.probe_interval = new_value;
+       }
+
+       return ret;
+}
+
 int sctp_sysctl_net_register(struct net *net)
 {
        struct ctl_table *table;
index bf0ac46..5f23804 100644 (file)
@@ -75,6 +75,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
        timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
        timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
        timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
+       timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0);
        timer_setup(&peer->proto_unreach_timer,
                    sctp_generate_proto_unreach_event, 0);
 
@@ -131,6 +132,9 @@ void sctp_transport_free(struct sctp_transport *transport)
        if (del_timer(&transport->reconf_timer))
                sctp_transport_put(transport);
 
+       if (del_timer(&transport->probe_timer))
+               sctp_transport_put(transport);
+
        /* Delete the ICMP proto unreachable timer if it's active. */
        if (del_timer(&transport->proto_unreach_timer))
                sctp_transport_put(transport);
@@ -207,6 +211,15 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
                        sctp_transport_hold(transport);
 }
 
+void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
+{
+       if (timer_pending(&transport->probe_timer))
+               return;
+       if (!mod_timer(&transport->probe_timer,
+                      jiffies + transport->probe_interval))
+               sctp_transport_hold(transport);
+}
+
 /* This transport has been assigned to an association.
  * Initialize fields from the association or from the sock itself.
  * Register the reference count in the association.
@@ -241,12 +254,143 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
                transport->pathmtu = sctp_dst_mtu(transport->dst);
        else
                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+
+       sctp_transport_pl_update(transport);
+}
+
+void sctp_transport_pl_send(struct sctp_transport *t)
+{
+       pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+                __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+       if (t->pl.probe_count < SCTP_MAX_PROBES) {
+               t->pl.probe_count++;
+               return;
+       }
+
+       if (t->pl.state == SCTP_PL_BASE) {
+               if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
+                       t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
+
+                       t->pl.pmtu = SCTP_MIN_PLPMTU;
+                       t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+                       sctp_assoc_sync_pmtu(t->asoc);
+               }
+       } else if (t->pl.state == SCTP_PL_SEARCH) {
+               if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
+                       t->pl.state = SCTP_PL_BASE;  /* Search -> Base */
+                       t->pl.probe_size = SCTP_BASE_PLPMTU;
+                       t->pl.probe_high = 0;
+
+                       t->pl.pmtu = SCTP_BASE_PLPMTU;
+                       t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+                       sctp_assoc_sync_pmtu(t->asoc);
+               } else { /* Normal probe failure. */
+                       t->pl.probe_high = t->pl.probe_size;
+                       t->pl.probe_size = t->pl.pmtu;
+               }
+       } else if (t->pl.state == SCTP_PL_COMPLETE) {
+               if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
+                       t->pl.state = SCTP_PL_BASE;  /* Search Complete -> Base */
+                       t->pl.probe_size = SCTP_BASE_PLPMTU;
+
+                       t->pl.pmtu = SCTP_BASE_PLPMTU;
+                       t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+                       sctp_assoc_sync_pmtu(t->asoc);
+               }
+       }
+       t->pl.probe_count = 1;
+}
+
+void sctp_transport_pl_recv(struct sctp_transport *t)
+{
+       pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+                __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+       t->pl.pmtu = t->pl.probe_size;
+       t->pl.probe_count = 0;
+       if (t->pl.state == SCTP_PL_BASE) {
+               t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */
+               t->pl.probe_size += SCTP_PL_BIG_STEP;
+       } else if (t->pl.state == SCTP_PL_ERROR) {
+               t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */
+
+               t->pl.pmtu = t->pl.probe_size;
+               t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+               sctp_assoc_sync_pmtu(t->asoc);
+               t->pl.probe_size += SCTP_PL_BIG_STEP;
+       } else if (t->pl.state == SCTP_PL_SEARCH) {
+               if (!t->pl.probe_high) {
+                       t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
+                                              SCTP_MAX_PLPMTU);
+                       return;
+               }
+               t->pl.probe_size += SCTP_PL_MIN_STEP;
+               if (t->pl.probe_size >= t->pl.probe_high) {
+                       t->pl.probe_high = 0;
+                       t->pl.raise_count = 0;
+                       t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
+
+                       t->pl.probe_size = t->pl.pmtu;
+                       t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+                       sctp_assoc_sync_pmtu(t->asoc);
+               }
+       } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) {
+               /* Raise probe_size again after 30 * interval in Search Complete */
+               t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
+               t->pl.probe_size += SCTP_PL_MIN_STEP;
+       }
+}
+
+static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
+{
+       pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n",
+                __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu);
+
+       if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size)
+               return false;
+
+       if (t->pl.state == SCTP_PL_BASE) {
+               if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) {
+                       t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
+
+                       t->pl.pmtu = SCTP_MIN_PLPMTU;
+                       t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+               }
+       } else if (t->pl.state == SCTP_PL_SEARCH) {
+               if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
+                       t->pl.state = SCTP_PL_BASE;  /* Search -> Base */
+                       t->pl.probe_size = SCTP_BASE_PLPMTU;
+                       t->pl.probe_count = 0;
+
+                       t->pl.probe_high = 0;
+                       t->pl.pmtu = SCTP_BASE_PLPMTU;
+                       t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+               } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) {
+                       t->pl.probe_size = pmtu;
+                       t->pl.probe_count = 0;
+
+                       return false;
+               }
+       } else if (t->pl.state == SCTP_PL_COMPLETE) {
+               if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
+                       t->pl.state = SCTP_PL_BASE;  /* Complete -> Base */
+                       t->pl.probe_size = SCTP_BASE_PLPMTU;
+                       t->pl.probe_count = 0;
+
+                       t->pl.probe_high = 0;
+                       t->pl.pmtu = SCTP_BASE_PLPMTU;
+                       t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+               }
+       }
+
+       return true;
 }
 
 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
-       struct dst_entry *dst = sctp_transport_dst_check(t);
        struct sock *sk = t->asoc->base.sk;
+       struct dst_entry *dst;
        bool change = true;
 
        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -257,6 +401,10 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
        }
        pmtu = SCTP_TRUNC4(pmtu);
 
+       if (sctp_transport_pl_enabled(t))
+               return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t));
+
+       dst = sctp_transport_dst_check(t);
        if (dst) {
                struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
                union sctp_addr addr;
index 614013e..e80e34f 100644 (file)
@@ -393,17 +393,17 @@ int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (!skip_serv) {
                        rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
-                       if (rc_srv && rc_srv != ENODATA)
+                       if (rc_srv && rc_srv != -ENODATA)
                                break;
                } else {
                        skip_serv = 0;
                }
                rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
-               if (rc_clnt && rc_clnt != ENODATA) {
+               if (rc_clnt && rc_clnt != -ENODATA) {
                        skip_serv = 1;
                        break;
                }
-               if (rc_clnt == ENODATA && rc_srv == ENODATA)
+               if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
                        break;
        }
        mutex_unlock(&net->smc.mutex_fback_rsn);
index 075c4f4..289025c 100644 (file)
@@ -154,6 +154,9 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
                goto out_err;
        }
 
+       if (sk->sk_state == SMC_INIT)
+               return -ENOTCONN;
+
        if (len > conn->sndbuf_desc->len)
                SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk);
 
@@ -164,8 +167,6 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
                SMC_STAT_INC(smc, urg_data_cnt);
 
        while (msg_data_left(msg)) {
-               if (sk->sk_state == SMC_INIT)
-                       return -ENOTCONN;
                if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
                    (smc->sk.sk_err == ECONNABORTED) ||
                    conn->killed)
index 27e3e7d..bd9233d 100644 (file)
@@ -165,6 +165,54 @@ static const struct file_operations socket_file_ops = {
        .show_fdinfo =  sock_show_fdinfo,
 };
 
+static const char * const pf_family_names[] = {
+       [PF_UNSPEC]     = "PF_UNSPEC",
+       [PF_UNIX]       = "PF_UNIX/PF_LOCAL",
+       [PF_INET]       = "PF_INET",
+       [PF_AX25]       = "PF_AX25",
+       [PF_IPX]        = "PF_IPX",
+       [PF_APPLETALK]  = "PF_APPLETALK",
+       [PF_NETROM]     = "PF_NETROM",
+       [PF_BRIDGE]     = "PF_BRIDGE",
+       [PF_ATMPVC]     = "PF_ATMPVC",
+       [PF_X25]        = "PF_X25",
+       [PF_INET6]      = "PF_INET6",
+       [PF_ROSE]       = "PF_ROSE",
+       [PF_DECnet]     = "PF_DECnet",
+       [PF_NETBEUI]    = "PF_NETBEUI",
+       [PF_SECURITY]   = "PF_SECURITY",
+       [PF_KEY]        = "PF_KEY",
+       [PF_NETLINK]    = "PF_NETLINK/PF_ROUTE",
+       [PF_PACKET]     = "PF_PACKET",
+       [PF_ASH]        = "PF_ASH",
+       [PF_ECONET]     = "PF_ECONET",
+       [PF_ATMSVC]     = "PF_ATMSVC",
+       [PF_RDS]        = "PF_RDS",
+       [PF_SNA]        = "PF_SNA",
+       [PF_IRDA]       = "PF_IRDA",
+       [PF_PPPOX]      = "PF_PPPOX",
+       [PF_WANPIPE]    = "PF_WANPIPE",
+       [PF_LLC]        = "PF_LLC",
+       [PF_IB]         = "PF_IB",
+       [PF_MPLS]       = "PF_MPLS",
+       [PF_CAN]        = "PF_CAN",
+       [PF_TIPC]       = "PF_TIPC",
+       [PF_BLUETOOTH]  = "PF_BLUETOOTH",
+       [PF_IUCV]       = "PF_IUCV",
+       [PF_RXRPC]      = "PF_RXRPC",
+       [PF_ISDN]       = "PF_ISDN",
+       [PF_PHONET]     = "PF_PHONET",
+       [PF_IEEE802154] = "PF_IEEE802154",
+       [PF_CAIF]       = "PF_CAIF",
+       [PF_ALG]        = "PF_ALG",
+       [PF_NFC]        = "PF_NFC",
+       [PF_VSOCK]      = "PF_VSOCK",
+       [PF_KCM]        = "PF_KCM",
+       [PF_QIPCRTR]    = "PF_QIPCRTR",
+       [PF_SMC]        = "PF_SMC",
+       [PF_XDP]        = "PF_XDP",
+};
+
 /*
  *     The protocol list. Each protocol is registered in here.
  */
@@ -1072,19 +1120,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
  *     what to do with it - that's up to the protocol still.
  */
 
-/**
- *     get_net_ns - increment the refcount of the network namespace
- *     @ns: common namespace (net)
- *
- *     Returns the net's common namespace.
- */
-
-struct ns_common *get_net_ns(struct ns_common *ns)
-{
-       return &get_net(container_of(ns, struct net, ns))->ns;
-}
-EXPORT_SYMBOL_GPL(get_net_ns);
-
 static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 {
        struct socket *sock;
@@ -2988,7 +3023,7 @@ int sock_register(const struct net_proto_family *ops)
        }
        spin_unlock(&net_family_lock);
 
-       pr_info("NET: Registered protocol family %d\n", ops->family);
+       pr_info("NET: Registered %s protocol family\n", pf_family_names[ops->family]);
        return err;
 }
 EXPORT_SYMBOL(sock_register);
@@ -3016,7 +3051,7 @@ void sock_unregister(int family)
 
        synchronize_rcu();
 
-       pr_info("NET: Unregistered protocol family %d\n", family);
+       pr_info("NET: Unregistered %s protocol family\n", pf_family_names[family]);
 }
 EXPORT_SYMBOL(sock_unregister);
 
index f555d33..42623d6 100644 (file)
@@ -1677,13 +1677,6 @@ call_reserveresult(struct rpc_task *task)
                return;
        }
 
-       /*
-        * Even though there was an error, we may have acquired
-        * a request slot somehow.  Make sure not to leak it.
-        */
-       if (task->tk_rqstp)
-               xprt_release(task);
-
        switch (status) {
        case -ENOMEM:
                rpc_delay(task, HZ >> 2);
index e5b5a96..3509a7f 100644 (file)
@@ -70,6 +70,7 @@
 static void     xprt_init(struct rpc_xprt *xprt, struct net *net);
 static __be32  xprt_alloc_xid(struct rpc_xprt *xprt);
 static void     xprt_destroy(struct rpc_xprt *xprt);
+static void     xprt_request_init(struct rpc_task *task);
 
 static DEFINE_SPINLOCK(xprt_list_lock);
 static LIST_HEAD(xprt_list);
@@ -1606,17 +1607,40 @@ xprt_transmit(struct rpc_task *task)
        spin_unlock(&xprt->queue_lock);
 }
 
-static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+static void xprt_complete_request_init(struct rpc_task *task)
+{
+       if (task->tk_rqstp)
+               xprt_request_init(task);
+}
+
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        set_bit(XPRT_CONGESTED, &xprt->state);
-       rpc_sleep_on(&xprt->backlog, task, NULL);
+       rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
+}
+EXPORT_SYMBOL_GPL(xprt_add_backlog);
+
+static bool __xprt_set_rq(struct rpc_task *task, void *data)
+{
+       struct rpc_rqst *req = data;
+
+       if (task->tk_rqstp == NULL) {
+               memset(req, 0, sizeof(*req));   /* mark unused */
+               task->tk_rqstp = req;
+               return true;
+       }
+       return false;
 }
 
-static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
-       if (rpc_wake_up_next(&xprt->backlog) == NULL)
+       if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
                clear_bit(XPRT_CONGESTED, &xprt->state);
+               return false;
+       }
+       return true;
 }
+EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
 
 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
 {
@@ -1626,7 +1650,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
                goto out;
        spin_lock(&xprt->reserve_lock);
        if (test_bit(XPRT_CONGESTED, &xprt->state)) {
-               rpc_sleep_on(&xprt->backlog, task, NULL);
+               xprt_add_backlog(xprt, task);
                ret = true;
        }
        spin_unlock(&xprt->reserve_lock);
@@ -1703,11 +1727,11 @@ EXPORT_SYMBOL_GPL(xprt_alloc_slot);
 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
        spin_lock(&xprt->reserve_lock);
-       if (!xprt_dynamic_free_slot(xprt, req)) {
+       if (!xprt_wake_up_backlog(xprt, req) &&
+           !xprt_dynamic_free_slot(xprt, req)) {
                memset(req, 0, sizeof(*req));   /* mark unused */
                list_add(&req->rq_list, &xprt->free);
        }
-       xprt_wake_up_backlog(xprt);
        spin_unlock(&xprt->reserve_lock);
 }
 EXPORT_SYMBOL_GPL(xprt_free_slot);
@@ -1894,10 +1918,10 @@ void xprt_release(struct rpc_task *task)
        xdr_free_bvec(&req->rq_snd_buf);
        if (req->rq_cred != NULL)
                put_rpccred(req->rq_cred);
-       task->tk_rqstp = NULL;
        if (req->rq_release_snd_buf)
                req->rq_release_snd_buf(req);
 
+       task->tk_rqstp = NULL;
        if (likely(!bc_prealloc(req)))
                xprt->ops->free_slot(xprt, req);
        else
index 649f7d8..c335c13 100644 (file)
@@ -628,8 +628,9 @@ out_mapping_err:
        return false;
 }
 
-/* The tail iovec might not reside in the same page as the
- * head iovec.
+/* The tail iovec may include an XDR pad for the page list,
+ * as well as additional content, and may not reside in the
+ * same page as the head iovec.
  */
 static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
                                     struct xdr_buf *xdr,
@@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
                                   struct rpcrdma_req *req,
                                   struct xdr_buf *xdr)
 {
-       struct kvec *tail = &xdr->tail[0];
-
        if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
                return false;
 
-       /* If there is a Read chunk, the page list is handled
+       /* If there is a Read chunk, the page list is being handled
         * via explicit RDMA, and thus is skipped here.
         */
 
-       if (tail->iov_len) {
-               if (!rpcrdma_prepare_tail_iov(req, xdr,
-                                             offset_in_page(tail->iov_base),
-                                             tail->iov_len))
+       /* Do not include the tail if it is only an XDR pad */
+       if (xdr->tail[0].iov_len > 3) {
+               unsigned int page_base, len;
+
+               /* If the content in the page list is an odd length,
+                * xdr_write_pages() adds a pad at the beginning of
+                * the tail iovec. Force the tail's non-pad content to
+                * land at the next XDR position in the Send message.
+                */
+               page_base = offset_in_page(xdr->tail[0].iov_base);
+               len = xdr->tail[0].iov_len;
+               page_base += len & 3;
+               len -= len & 3;
+               if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
                        return false;
                kref_get(&req->rl_kref);
        }
index 0995359..19a49d2 100644 (file)
@@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
        return;
 
 out_sleep:
-       set_bit(XPRT_CONGESTED, &xprt->state);
-       rpc_sleep_on(&xprt->backlog, task, NULL);
        task->tk_status = -EAGAIN;
+       xprt_add_backlog(xprt, task);
 }
 
 /**
@@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
        struct rpcrdma_xprt *r_xprt =
                container_of(xprt, struct rpcrdma_xprt, rx_xprt);
 
-       memset(rqst, 0, sizeof(*rqst));
-       rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
-       if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
-               clear_bit(XPRT_CONGESTED, &xprt->state);
+       rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+       if (!xprt_wake_up_backlog(xprt, rqst)) {
+               memset(rqst, 0, sizeof(*rqst));
+               rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
+       }
 }
 
 static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
index 1e965a3..649c235 100644 (file)
@@ -1201,6 +1201,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
 }
 
 /**
+ * rpcrdma_reply_put - Put reply buffers back into pool
+ * @buffers: buffer pool
+ * @req: object to return
+ *
+ */
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
+{
+       if (req->rl_reply) {
+               rpcrdma_rep_put(buffers, req->rl_reply);
+               req->rl_reply = NULL;
+       }
+}
+
+/**
  * rpcrdma_buffer_get - Get a request buffer
  * @buffers: Buffer pool from which to obtain a buffer
  *
@@ -1228,9 +1242,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
  */
 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
 {
-       if (req->rl_reply)
-               rpcrdma_rep_put(buffers, req->rl_reply);
-       req->rl_reply = NULL;
+       rpcrdma_reply_put(buffers, req);
 
        spin_lock(&buffers->rb_lock);
        list_add(&req->rl_list, &buffers->rb_send_bufs);
index 436ad73..5d231d9 100644 (file)
@@ -479,6 +479,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
                        struct rpcrdma_req *req);
 void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
+void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
 
 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
                            gfp_t flags);
index 47aa47a..316d049 100644 (file)
@@ -1010,6 +1010,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
                        kernel_sock_shutdown(transport->sock, SHUT_RDWR);
                return -ENOTCONN;
        }
+       if (!transport->inet)
+               return -ENOTCONN;
 
        xs_pktdump("packet data:",
                                req->rq_svec->iov_base,
index 89a36db..070698d 100644 (file)
@@ -381,19 +381,20 @@ EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
 static int __switchdev_handle_port_obj_add(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*add_cb)(struct net_device *dev,
+                       int (*add_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj,
                                      struct netlink_ext_ack *extack))
 {
+       struct switchdev_notifier_info *info = &port_obj_info->info;
        struct netlink_ext_ack *extack;
        struct net_device *lower_dev;
        struct list_head *iter;
        int err = -EOPNOTSUPP;
 
-       extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+       extack = switchdev_notifier_info_to_extack(info);
 
        if (check_cb(dev)) {
-               err = add_cb(dev, port_obj_info->obj, extack);
+               err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
                if (err != -EOPNOTSUPP)
                        port_obj_info->handled = true;
                return err;
@@ -422,7 +423,7 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
 int switchdev_handle_port_obj_add(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*add_cb)(struct net_device *dev,
+                       int (*add_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj,
                                      struct netlink_ext_ack *extack))
 {
@@ -439,15 +440,16 @@ EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
 static int __switchdev_handle_port_obj_del(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*del_cb)(struct net_device *dev,
+                       int (*del_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj))
 {
+       struct switchdev_notifier_info *info = &port_obj_info->info;
        struct net_device *lower_dev;
        struct list_head *iter;
        int err = -EOPNOTSUPP;
 
        if (check_cb(dev)) {
-               err = del_cb(dev, port_obj_info->obj);
+               err = del_cb(dev, info->ctx, port_obj_info->obj);
                if (err != -EOPNOTSUPP)
                        port_obj_info->handled = true;
                return err;
@@ -476,7 +478,7 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
 int switchdev_handle_port_obj_del(struct net_device *dev,
                        struct switchdev_notifier_port_obj_info *port_obj_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*del_cb)(struct net_device *dev,
+                       int (*del_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_obj *obj))
 {
        int err;
@@ -492,19 +494,20 @@ EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
 static int __switchdev_handle_port_attr_set(struct net_device *dev,
                        struct switchdev_notifier_port_attr_info *port_attr_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*set_cb)(struct net_device *dev,
+                       int (*set_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_attr *attr,
                                      struct netlink_ext_ack *extack))
 {
+       struct switchdev_notifier_info *info = &port_attr_info->info;
        struct netlink_ext_ack *extack;
        struct net_device *lower_dev;
        struct list_head *iter;
        int err = -EOPNOTSUPP;
 
-       extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
+       extack = switchdev_notifier_info_to_extack(info);
 
        if (check_cb(dev)) {
-               err = set_cb(dev, port_attr_info->attr, extack);
+               err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
                if (err != -EOPNOTSUPP)
                        port_attr_info->handled = true;
                return err;
@@ -533,7 +536,7 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
 int switchdev_handle_port_attr_set(struct net_device *dev,
                        struct switchdev_notifier_port_attr_info *port_attr_info,
                        bool (*check_cb)(const struct net_device *dev),
-                       int (*set_cb)(struct net_device *dev,
+                       int (*set_cb)(struct net_device *dev, const void *ctx,
                                      const struct switchdev_attr *attr,
                                      struct netlink_ext_ack *extack))
 {
index d4beca8..593846d 100644 (file)
@@ -699,7 +699,7 @@ int tipc_bcast_init(struct net *net)
        spin_lock_init(&tipc_net(net)->bclock);
 
        if (!tipc_link_bc_create(net, 0, 0, NULL,
-                                FB_MTU,
+                                one_page_mtu,
                                 BCLINK_WIN_DEFAULT,
                                 BCLINK_WIN_DEFAULT,
                                 0,
index ce6ab54..5c9fd47 100644 (file)
 #include "name_table.h"
 #include "crypto.h"
 
+#define BUF_ALIGN(x) ALIGN(x, 4)
 #define MAX_FORWARD_SIZE 1024
 #ifdef CONFIG_TIPC_CRYPTO
 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
-#define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
+#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
 #else
 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
-#define BUF_TAILROOM 16
+#define BUF_OVERHEAD BUF_HEADROOM
 #endif
 
-static unsigned int align(unsigned int i)
-{
-       return (i + 3) & ~3u;
-}
+const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
+                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 /**
  * tipc_buf_acquire - creates a TIPC message buffer
@@ -69,13 +68,8 @@ static unsigned int align(unsigned int i)
 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
 {
        struct sk_buff *skb;
-#ifdef CONFIG_TIPC_CRYPTO
-       unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
-#else
-       unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
-#endif
 
-       skb = alloc_skb_fclone(buf_size, gfp);
+       skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
        if (skb) {
                skb_reserve(skb, BUF_HEADROOM);
                skb_put(skb, size);
@@ -395,7 +389,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
                if (unlikely(!skb)) {
                        if (pktmax != MAX_MSG_SIZE)
                                return -ENOMEM;
-                       rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
+                       rc = tipc_msg_build(mhdr, m, offset, dsz,
+                                           one_page_mtu, list);
                        if (rc != dsz)
                                return rc;
                        if (tipc_msg_assemble(list))
@@ -490,7 +485,7 @@ static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
 
        msz = msg_size(msg);
        bsz = msg_size(bmsg);
-       offset = align(bsz);
+       offset = BUF_ALIGN(bsz);
        pad = offset - bsz;
 
        if (unlikely(skb_tailroom(bskb) < (pad + msz)))
@@ -547,7 +542,7 @@ bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
 
        /* Make a new bundle of the two messages if possible */
        tsz = msg_size(buf_msg(tskb));
-       if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
+       if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
                return true;
        if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
                                      GFP_ATOMIC)))
@@ -606,7 +601,7 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
        if (unlikely(!tipc_msg_validate(iskb)))
                goto none;
 
-       *pos += align(imsz);
+       *pos += BUF_ALIGN(imsz);
        return true;
 none:
        kfree_skb(skb);
index 5d64596..64ae4c4 100644 (file)
@@ -99,9 +99,10 @@ struct plist;
 #define MAX_H_SIZE                60   /* Largest possible TIPC header size */
 
 #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
-#define FB_MTU                  3744
 #define TIPC_MEDIA_INFO_OFFSET 5
 
+extern const int one_page_mtu;
+
 struct tipc_skb_cb {
        union {
                struct {
index 4d4f24c..58c2f31 100644 (file)
@@ -262,6 +262,14 @@ static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
        sk_add_node(sk, list);
 }
 
+static void __unix_set_addr(struct sock *sk, struct unix_address *addr,
+                           unsigned hash)
+{
+       __unix_remove_socket(sk);
+       smp_store_release(&unix_sk(sk)->addr, addr);
+       __unix_insert_socket(&unix_socket_table[hash], sk);
+}
+
 static inline void unix_remove_socket(struct sock *sk)
 {
        spin_lock(&unix_table_lock);
@@ -278,11 +286,11 @@ static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
 
 static struct sock *__unix_find_socket_byname(struct net *net,
                                              struct sockaddr_un *sunname,
-                                             int len, int type, unsigned int hash)
+                                             int len, unsigned int hash)
 {
        struct sock *s;
 
-       sk_for_each(s, &unix_socket_table[hash ^ type]) {
+       sk_for_each(s, &unix_socket_table[hash]) {
                struct unix_sock *u = unix_sk(s);
 
                if (!net_eq(sock_net(s), net))
@@ -297,13 +305,12 @@ static struct sock *__unix_find_socket_byname(struct net *net,
 
 static inline struct sock *unix_find_socket_byname(struct net *net,
                                                   struct sockaddr_un *sunname,
-                                                  int len, int type,
-                                                  unsigned int hash)
+                                                  int len, unsigned int hash)
 {
        struct sock *s;
 
        spin_lock(&unix_table_lock);
-       s = __unix_find_socket_byname(net, sunname, len, type, hash);
+       s = __unix_find_socket_byname(net, sunname, len, hash);
        if (s)
                sock_hold(s);
        spin_unlock(&unix_table_lock);
@@ -535,12 +542,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
        u->path.mnt = NULL;
        state = sk->sk_state;
        sk->sk_state = TCP_CLOSE;
+
+       skpair = unix_peer(sk);
+       unix_peer(sk) = NULL;
+
        unix_state_unlock(sk);
 
        wake_up_interruptible_all(&u->peer_wait);
 
-       skpair = unix_peer(sk);
-
        if (skpair != NULL) {
                if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
                        unix_state_lock(skpair);
@@ -555,7 +564,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
 
                unix_dgram_peer_wake_disconnect(sk, skpair);
                sock_put(skpair); /* It may now die */
-               unix_peer(sk) = NULL;
        }
 
        /* Try to flush out this socket. Throw out buffers at least */
@@ -890,12 +898,12 @@ static int unix_autobind(struct socket *sock)
 retry:
        addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
        addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
+       addr->hash ^= sk->sk_type;
 
        spin_lock(&unix_table_lock);
        ordernum = (ordernum+1)&0xFFFFF;
 
-       if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
-                                     addr->hash)) {
+       if (__unix_find_socket_byname(net, addr->name, addr->len, addr->hash)) {
                spin_unlock(&unix_table_lock);
                /*
                 * __unix_find_socket_byname() may take long time if many names
@@ -910,11 +918,8 @@ retry:
                }
                goto retry;
        }
-       addr->hash ^= sk->sk_type;
 
-       __unix_remove_socket(sk);
-       smp_store_release(&u->addr, addr);
-       __unix_insert_socket(&unix_socket_table[addr->hash], sk);
+       __unix_set_addr(sk, addr, addr->hash);
        spin_unlock(&unix_table_lock);
        err = 0;
 
@@ -959,7 +964,7 @@ static struct sock *unix_find_other(struct net *net,
                }
        } else {
                err = -ECONNREFUSED;
-               u = unix_find_socket_byname(net, sunname, len, type, hash);
+               u = unix_find_socket_byname(net, sunname, len, type ^ hash);
                if (u) {
                        struct dentry *dentry;
                        dentry = unix_sk(u)->path.dentry;
@@ -977,125 +982,125 @@ fail:
        return NULL;
 }
 
-static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+static int unix_bind_bsd(struct sock *sk, struct unix_address *addr)
 {
+       struct unix_sock *u = unix_sk(sk);
+       umode_t mode = S_IFSOCK |
+              (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
+       struct user_namespace *ns; // barf...
+       struct path parent;
        struct dentry *dentry;
-       struct path path;
-       int err = 0;
+       unsigned int hash;
+       int err;
+
        /*
         * Get the parent directory, calculate the hash for last
         * component.
         */
-       dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
-       err = PTR_ERR(dentry);
+       dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
        if (IS_ERR(dentry))
-               return err;
+               return PTR_ERR(dentry);
+       ns = mnt_user_ns(parent.mnt);
 
        /*
         * All right, let's create it.
         */
-       err = security_path_mknod(&path, dentry, mode, 0);
-       if (!err) {
-               err = vfs_mknod(mnt_user_ns(path.mnt), d_inode(path.dentry),
-                               dentry, mode, 0);
-               if (!err) {
-                       res->mnt = mntget(path.mnt);
-                       res->dentry = dget(dentry);
-               }
-       }
-       done_path_create(&path, dentry);
+       err = security_path_mknod(&parent, dentry, mode, 0);
+       if (!err)
+               err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
+       if (err)
+               goto out;
+       err = mutex_lock_interruptible(&u->bindlock);
+       if (err)
+               goto out_unlink;
+       if (u->addr)
+               goto out_unlock;
+
+       addr->hash = UNIX_HASH_SIZE;
+       hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+       spin_lock(&unix_table_lock);
+       u->path.mnt = mntget(parent.mnt);
+       u->path.dentry = dget(dentry);
+       __unix_set_addr(sk, addr, hash);
+       spin_unlock(&unix_table_lock);
+       mutex_unlock(&u->bindlock);
+       done_path_create(&parent, dentry);
+       return 0;
+
+out_unlock:
+       mutex_unlock(&u->bindlock);
+       err = -EINVAL;
+out_unlink:
+       /* failed after successful mknod?  unlink what we'd created... */
+       vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
+out:
+       done_path_create(&parent, dentry);
        return err;
 }
 
+static int unix_bind_abstract(struct sock *sk, struct unix_address *addr)
+{
+       struct unix_sock *u = unix_sk(sk);
+       int err;
+
+       err = mutex_lock_interruptible(&u->bindlock);
+       if (err)
+               return err;
+
+       if (u->addr) {
+               mutex_unlock(&u->bindlock);
+               return -EINVAL;
+       }
+
+       spin_lock(&unix_table_lock);
+       if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
+                                     addr->hash)) {
+               spin_unlock(&unix_table_lock);
+               mutex_unlock(&u->bindlock);
+               return -EADDRINUSE;
+       }
+       __unix_set_addr(sk, addr, addr->hash);
+       spin_unlock(&unix_table_lock);
+       mutex_unlock(&u->bindlock);
+       return 0;
+}
+
 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 {
        struct sock *sk = sock->sk;
-       struct net *net = sock_net(sk);
-       struct unix_sock *u = unix_sk(sk);
        struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
        char *sun_path = sunaddr->sun_path;
        int err;
        unsigned int hash;
        struct unix_address *addr;
-       struct hlist_head *list;
-       struct path path = { };
 
-       err = -EINVAL;
        if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
            sunaddr->sun_family != AF_UNIX)
-               goto out;
+               return -EINVAL;
 
-       if (addr_len == sizeof(short)) {
-               err = unix_autobind(sock);
-               goto out;
-       }
+       if (addr_len == sizeof(short))
+               return unix_autobind(sock);
 
        err = unix_mkname(sunaddr, addr_len, &hash);
        if (err < 0)
-               goto out;
+               return err;
        addr_len = err;
-
-       if (sun_path[0]) {
-               umode_t mode = S_IFSOCK |
-                      (SOCK_INODE(sock)->i_mode & ~current_umask());
-               err = unix_mknod(sun_path, mode, &path);
-               if (err) {
-                       if (err == -EEXIST)
-                               err = -EADDRINUSE;
-                       goto out;
-               }
-       }
-
-       err = mutex_lock_interruptible(&u->bindlock);
-       if (err)
-               goto out_put;
-
-       err = -EINVAL;
-       if (u->addr)
-               goto out_up;
-
-       err = -ENOMEM;
        addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
        if (!addr)
-               goto out_up;
+               return -ENOMEM;
 
        memcpy(addr->name, sunaddr, addr_len);
        addr->len = addr_len;
        addr->hash = hash ^ sk->sk_type;
        refcount_set(&addr->refcnt, 1);
 
-       if (sun_path[0]) {
-               addr->hash = UNIX_HASH_SIZE;
-               hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
-               spin_lock(&unix_table_lock);
-               u->path = path;
-               list = &unix_socket_table[hash];
-       } else {
-               spin_lock(&unix_table_lock);
-               err = -EADDRINUSE;
-               if (__unix_find_socket_byname(net, sunaddr, addr_len,
-                                             sk->sk_type, hash)) {
-                       unix_release_addr(addr);
-                       goto out_unlock;
-               }
-
-               list = &unix_socket_table[addr->hash];
-       }
-
-       err = 0;
-       __unix_remove_socket(sk);
-       smp_store_release(&u->addr, addr);
-       __unix_insert_socket(list, sk);
-
-out_unlock:
-       spin_unlock(&unix_table_lock);
-out_up:
-       mutex_unlock(&u->bindlock);
-out_put:
+       if (sun_path[0])
+               err = unix_bind_bsd(sk, addr);
+       else
+               err = unix_bind_abstract(sk, addr);
        if (err)
-               path_put(&path);
-out:
-       return err;
+               unix_release_addr(addr);
+       return err == -EEXIST ? -EADDRINUSE : err;
 }
 
 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
index 67954af..21ccf45 100644 (file)
@@ -860,7 +860,7 @@ s64 vsock_stream_has_data(struct vsock_sock *vsk)
 }
 EXPORT_SYMBOL_GPL(vsock_stream_has_data);
 
-static s64 vsock_has_data(struct vsock_sock *vsk)
+static s64 vsock_connectible_has_data(struct vsock_sock *vsk)
 {
        struct sock *sk = sk_vsock(vsk);
 
@@ -1866,10 +1866,11 @@ out:
        return err;
 }
 
-static int vsock_wait_data(struct sock *sk, struct wait_queue_entry *wait,
-                          long timeout,
-                          struct vsock_transport_recv_notify_data *recv_data,
-                          size_t target)
+static int vsock_connectible_wait_data(struct sock *sk,
+                                      struct wait_queue_entry *wait,
+                                      long timeout,
+                                      struct vsock_transport_recv_notify_data *recv_data,
+                                      size_t target)
 {
        const struct vsock_transport *transport;
        struct vsock_sock *vsk;
@@ -1880,7 +1881,7 @@ static int vsock_wait_data(struct sock *sk, struct wait_queue_entry *wait,
        err = 0;
        transport = vsk->transport;
 
-       while ((data = vsock_has_data(vsk)) == 0) {
+       while ((data = vsock_connectible_has_data(vsk)) == 0) {
                prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
 
                if (sk->sk_err != 0 ||
@@ -1967,7 +1968,8 @@ static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
        while (1) {
                ssize_t read;
 
-               err = vsock_wait_data(sk, &wait, timeout, &recv_data, target);
+               err = vsock_connectible_wait_data(sk, &wait, timeout,
+                                                 &recv_data, target);
                if (err <= 0)
                        break;
 
@@ -2022,7 +2024,7 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
 
        timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
-       err = vsock_wait_data(sk, &wait, timeout, NULL, 0);
+       err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0);
        if (err <= 0)
                goto out;
 
index e73ce65..ed1664e 100644 (file)
@@ -498,9 +498,11 @@ static bool virtio_transport_seqpacket_allow(u32 remote_cid)
        struct virtio_vsock *vsock;
        bool seqpacket_allow;
 
+       seqpacket_allow = false;
        rcu_read_lock();
        vsock = rcu_dereference(the_virtio_vsock);
-       seqpacket_allow = vsock->seqpacket_allow;
+       if (vsock)
+               seqpacket_allow = vsock->seqpacket_allow;
        rcu_read_unlock();
 
        return seqpacket_allow;
index 23704a6..f014ccf 100644 (file)
@@ -413,7 +413,6 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
        struct virtio_vsock_pkt *pkt;
        int dequeued_len = 0;
        size_t user_buf_len = msg_data_left(msg);
-       bool copy_failed = false;
        bool msg_ready = false;
 
        spin_lock_bh(&vvs->rx_lock);
@@ -426,7 +425,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
        while (!msg_ready) {
                pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
 
-               if (!copy_failed) {
+               if (dequeued_len >= 0) {
                        size_t pkt_len;
                        size_t bytes_to_copy;
 
@@ -443,11 +442,9 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
 
                                err = memcpy_to_msg(msg, pkt->buf, bytes_to_copy);
                                if (err) {
-                                       /* Copy of message failed, set flag to skip
-                                        * copy path for rest of fragments. Rest of
+                                       /* Copy of message failed. Rest of
                                         * fragments will be freed without copy.
                                         */
-                                       copy_failed = true;
                                        dequeued_len = err;
                                } else {
                                        user_buf_len -= bytes_to_copy;
index 2eee939..af590ae 100644 (file)
@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
        @$(kecho) "  GEN     $@"
        @(echo '#include "reg.h"'; \
          echo 'const u8 shipped_regdb_certs[] = {'; \
-         cat $^ ; \
+         echo | cat - $^ ; \
          echo '};'; \
          echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
         ) > $@
index 285b807..869c43d 100644 (file)
@@ -6,7 +6,7 @@
  *
  * Copyright 2009      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright 2018-2020 Intel Corporation
+ * Copyright 2018-2021 Intel Corporation
  */
 
 #include <linux/export.h>
@@ -942,7 +942,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
        struct ieee80211_sta_vht_cap *vht_cap;
        struct ieee80211_edmg *edmg_cap;
        u32 width, control_freq, cap;
-       bool support_80_80 = false;
+       bool ext_nss_cap, support_80_80 = false;
 
        if (WARN_ON(!cfg80211_chandef_valid(chandef)))
                return false;
@@ -950,6 +950,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
        ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap;
        vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap;
        edmg_cap = &wiphy->bands[chandef->chan->band]->edmg_cap;
+       ext_nss_cap = __le16_to_cpu(vht_cap->vht_mcs.tx_highest) &
+                       IEEE80211_VHT_EXT_NSS_BW_CAPABLE;
 
        if (edmg_cap->channels &&
            !cfg80211_edmg_usable(wiphy,
@@ -1015,7 +1017,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                        (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
                        (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
                         cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
-                       u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1;
+                       (ext_nss_cap &&
+                        u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1);
                if (chandef->chan->band != NL80211_BAND_6GHZ && !support_80_80)
                        return false;
                fallthrough;
@@ -1037,7 +1040,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
                if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
                    cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ &&
-                   !(vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))
+                   !(ext_nss_cap &&
+                     (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)))
                        return false;
                break;
        default:
@@ -1335,3 +1339,34 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                WARN_ON(1);
        }
 }
+
+bool cfg80211_any_usable_channels(struct wiphy *wiphy,
+                                 unsigned long sband_mask,
+                                 u32 prohibited_flags)
+{
+       int idx;
+
+       prohibited_flags |= IEEE80211_CHAN_DISABLED;
+
+       for_each_set_bit(idx, &sband_mask, NUM_NL80211_BANDS) {
+               struct ieee80211_supported_band *sband = wiphy->bands[idx];
+               int chanidx;
+
+               if (!sband)
+                       continue;
+
+               for (chanidx = 0; chanidx < sband->n_channels; chanidx++) {
+                       struct ieee80211_channel *chan;
+
+                       chan = &sband->channels[chanidx];
+
+                       if (chan->flags & prohibited_flags)
+                               continue;
+
+                       return true;
+               }
+       }
+
+       return false;
+}
+EXPORT_SYMBOL(cfg80211_any_usable_channels);
index 6fbf753..0332312 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010         Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -532,11 +532,11 @@ use_default_name:
        wiphy_net_set(&rdev->wiphy, &init_net);
 
        rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
-       rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
-                                  &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
-                                  &rdev->rfkill_ops, rdev);
+       rdev->wiphy.rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
+                                         &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
+                                         &rdev->rfkill_ops, rdev);
 
-       if (!rdev->rfkill) {
+       if (!rdev->wiphy.rfkill) {
                wiphy_free(&rdev->wiphy);
                return NULL;
        }
@@ -589,14 +589,6 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
                if (WARN_ON(!c->num_different_channels))
                        return -EINVAL;
 
-               /*
-                * Put a sane limit on maximum number of different
-                * channels to simplify channel accounting code.
-                */
-               if (WARN_ON(c->num_different_channels >
-                               CFG80211_MAX_NUM_DIFFERENT_CHANNELS))
-                       return -EINVAL;
-
                /* DFS only works on one channel. */
                if (WARN_ON(c->radar_detect_widths &&
                            (c->num_different_channels > 1)))
@@ -936,9 +928,6 @@ int wiphy_register(struct wiphy *wiphy)
                return res;
        }
 
-       /* set up regulatory info */
-       wiphy_regulatory_register(wiphy);
-
        list_add_rcu(&rdev->list, &cfg80211_rdev_list);
        cfg80211_rdev_list_generation++;
 
@@ -949,6 +938,9 @@ int wiphy_register(struct wiphy *wiphy)
        cfg80211_debugfs_rdev_add(rdev);
        nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
 
+       /* set up regulatory info */
+       wiphy_regulatory_register(wiphy);
+
        if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
                struct regulatory_request request;
 
@@ -993,10 +985,10 @@ int wiphy_register(struct wiphy *wiphy)
        rdev->wiphy.registered = true;
        rtnl_unlock();
 
-       res = rfkill_register(rdev->rfkill);
+       res = rfkill_register(rdev->wiphy.rfkill);
        if (res) {
-               rfkill_destroy(rdev->rfkill);
-               rdev->rfkill = NULL;
+               rfkill_destroy(rdev->wiphy.rfkill);
+               rdev->wiphy.rfkill = NULL;
                wiphy_unregister(&rdev->wiphy);
                return res;
        }
@@ -1012,18 +1004,10 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy)
        if (!rdev->ops->rfkill_poll)
                return;
        rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
-       rfkill_resume_polling(rdev->rfkill);
+       rfkill_resume_polling(wiphy->rfkill);
 }
 EXPORT_SYMBOL(wiphy_rfkill_start_polling);
 
-void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
-{
-       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
-
-       rfkill_pause_polling(rdev->rfkill);
-}
-EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
-
 void wiphy_unregister(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -1035,8 +1019,8 @@ void wiphy_unregister(struct wiphy *wiphy)
                wiphy_unlock(&rdev->wiphy);
                __count == 0; }));
 
-       if (rdev->rfkill)
-               rfkill_unregister(rdev->rfkill);
+       if (rdev->wiphy.rfkill)
+               rfkill_unregister(rdev->wiphy.rfkill);
 
        rtnl_lock();
        wiphy_lock(&rdev->wiphy);
@@ -1088,7 +1072,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
 {
        struct cfg80211_internal_bss *scan, *tmp;
        struct cfg80211_beacon_registration *reg, *treg;
-       rfkill_destroy(rdev->rfkill);
+       rfkill_destroy(rdev->wiphy.rfkill);
        list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
                list_del(&reg->list);
                kfree(reg);
@@ -1110,7 +1094,7 @@ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
-       if (rfkill_set_hw_state_reason(rdev->rfkill, blocked, reason))
+       if (rfkill_set_hw_state_reason(wiphy->rfkill, blocked, reason))
                schedule_work(&rdev->rfkill_block);
 }
 EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason);
@@ -1340,6 +1324,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
        rdev->devlist_generation++;
        wdev->registered = true;
 
+       if (wdev->netdev &&
+           sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
+                             "phy80211"))
+               pr_err("failed to add phy80211 symlink to netdev!\n");
+
        nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
 }
 
@@ -1365,14 +1354,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
        if (ret)
                goto out;
 
-       if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
-                             "phy80211")) {
-               pr_err("failed to add phy80211 symlink to netdev!\n");
-               unregister_netdevice(dev);
-               ret = -EINVAL;
-               goto out;
-       }
-
        cfg80211_register_wdev(rdev, wdev);
        ret = 0;
 out:
@@ -1506,7 +1487,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                                             wdev->use_4addr, 0))
                        return notifier_from_errno(-EOPNOTSUPP);
 
-               if (rfkill_blocked(rdev->rfkill))
+               if (rfkill_blocked(rdev->wiphy.rfkill))
                        return notifier_from_errno(-ERFKILL);
                break;
        default:
index a7d19b4..b35d0db 100644 (file)
@@ -3,7 +3,7 @@
  * Wireless configuration interface internals.
  *
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #ifndef __NET_WIRELESS_CORE_H
 #define __NET_WIRELESS_CORE_H
@@ -27,7 +27,6 @@ struct cfg80211_registered_device {
 
        /* rfkill support */
        struct rfkill_ops rfkill_ops;
-       struct rfkill *rfkill;
        struct work_struct rfkill_block;
 
        /* ISO / IEC 3166 alpha2 for which this device is receiving
index fc9286a..50eb405 100644 (file)
@@ -330,7 +330,7 @@ nl80211_pmsr_req_attr_policy[NL80211_PMSR_REQ_ATTR_MAX + 1] = {
 };
 
 static const struct nla_policy
-nl80211_psmr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = {
+nl80211_pmsr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = {
        [NL80211_PMSR_PEER_ATTR_ADDR] = NLA_POLICY_ETH_ADDR,
        [NL80211_PMSR_PEER_ATTR_CHAN] = NLA_POLICY_NESTED(nl80211_policy),
        [NL80211_PMSR_PEER_ATTR_REQ] =
@@ -345,7 +345,7 @@ nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = {
        [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT },
        [NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT },
        [NL80211_PMSR_ATTR_PEERS] =
-               NLA_POLICY_NESTED_ARRAY(nl80211_psmr_peer_attr_policy),
+               NLA_POLICY_NESTED_ARRAY(nl80211_pmsr_peer_attr_policy),
 };
 
 static const struct nla_policy
@@ -1731,6 +1731,11 @@ nl80211_send_iftype_data(struct sk_buff *msg,
                    &iftdata->he_6ghz_capa))
                return -ENOBUFS;
 
+       if (iftdata->vendor_elems.data && iftdata->vendor_elems.len &&
+           nla_put(msg, NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS,
+                   iftdata->vendor_elems.len, iftdata->vendor_elems.data))
+               return -ENOBUFS;
+
        return 0;
 }
 
@@ -4781,11 +4786,10 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
                       sband->ht_cap.mcs.rx_mask,
                       sizeof(mask->control[i].ht_mcs));
 
-               if (!sband->vht_cap.vht_supported)
-                       continue;
-
-               vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
-               vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+               if (sband->vht_cap.vht_supported) {
+                       vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+                       vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+               }
 
                he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
                if (!he_cap)
@@ -13042,7 +13046,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (wdev_running(wdev))
                return 0;
 
-       if (rfkill_blocked(rdev->rfkill))
+       if (rfkill_blocked(rdev->wiphy.rfkill))
                return -ERFKILL;
 
        err = rdev_start_p2p_device(rdev, wdev);
@@ -13084,7 +13088,7 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
        if (wdev_running(wdev))
                return -EEXIST;
 
-       if (rfkill_blocked(rdev->rfkill))
+       if (rfkill_blocked(rdev->wiphy.rfkill))
                return -ERFKILL;
 
        if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF])
index 6bdd964..328cf54 100644 (file)
@@ -168,6 +168,18 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
                return -EINVAL;
        }
 
+       if (tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]) {
+               if (!out->ftm.non_trigger_based && !out->ftm.trigger_based) {
+                       NL_SET_ERR_MSG_ATTR(info->extack,
+                                           tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR],
+                                           "FTM: BSS color set for EDCA based ranging");
+                       return -EINVAL;
+               }
+
+               out->ftm.bss_color =
+                       nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]);
+       }
+
        return 0;
 }
 
@@ -334,6 +346,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
                            gfp_t gfp)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+       struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
        struct sk_buff *msg;
        void *hdr;
 
@@ -364,9 +377,20 @@ free_msg:
        nlmsg_free(msg);
 free_request:
        spin_lock_bh(&wdev->pmsr_lock);
-       list_del(&req->list);
+       /*
+        * cfg80211_pmsr_process_abort() may have already moved this request
+        * to the free list, and will free it later. In this case, don't free
+        * it here.
+        */
+       list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
+               if (tmp == req) {
+                       list_del(&req->list);
+                       to_free = req;
+                       break;
+               }
+       }
        spin_unlock_bh(&wdev->pmsr_lock);
-       kfree(req);
+       kfree(to_free);
 }
 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
 
index 8b1358d..b1d37f5 100644 (file)
@@ -464,8 +464,18 @@ static inline int rdev_assoc(struct cfg80211_registered_device *rdev,
                             struct net_device *dev,
                             struct cfg80211_assoc_request *req)
 {
+       const struct cfg80211_bss_ies *bss_ies;
        int ret;
-       trace_rdev_assoc(&rdev->wiphy, dev, req);
+
+       /*
+        * Note: we might trace not exactly the data that's processed,
+        * due to races and the driver/mac80211 getting a newer copy.
+        */
+       rcu_read_lock();
+       bss_ies = rcu_dereference(req->bss->ies);
+       trace_rdev_assoc(&rdev->wiphy, dev, req, bss_ies);
+       rcu_read_unlock();
+
        ret = rdev->ops->assoc(&rdev->wiphy, dev, req);
        trace_rdev_return_int(&rdev->wiphy, ret);
        return ret;
index 0406ce7..c2d0ff7 100644 (file)
@@ -3975,7 +3975,9 @@ static int __regulatory_set_wiphy_regd(struct wiphy *wiphy,
                 "wiphy should have REGULATORY_WIPHY_SELF_MANAGED\n"))
                return -EPERM;
 
-       if (WARN(!is_valid_rd(rd), "Invalid regulatory domain detected\n")) {
+       if (WARN(!is_valid_rd(rd),
+                "Invalid regulatory domain detected: %c%c\n",
+                rd->alpha2[0], rd->alpha2[1])) {
                print_regdomain_info(rd);
                return -EINVAL;
        }
@@ -4049,6 +4051,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
 
        wiphy_update_regulatory(wiphy, lr->initiator);
        wiphy_all_share_dfs_chan_state(wiphy);
+       reg_process_self_managed_hints();
 }
 
 void wiphy_regulatory_deregister(struct wiphy *wiphy)
index 4f06c18..f03c7ac 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2016      Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -618,7 +618,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
 
                freq = ieee80211_channel_to_frequency(ap_info->channel, band);
 
-               if (end - pos < count * ap_info->tbtt_info_len)
+               if (end - pos < count * length)
                        break;
 
                /*
@@ -630,7 +630,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
                if (band != NL80211_BAND_6GHZ ||
                    (length != IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM &&
                     length < IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM)) {
-                       pos += count * ap_info->tbtt_info_len;
+                       pos += count * length;
                        continue;
                }
 
@@ -653,7 +653,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
                                kfree(entry);
                        }
 
-                       pos += ap_info->tbtt_info_len;
+                       pos += length;
                }
        }
 
@@ -757,7 +757,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
        }
 
        request = kzalloc(struct_size(request, channels, n_channels) +
-                         sizeof(*request->scan_6ghz_params) * count,
+                         sizeof(*request->scan_6ghz_params) * count +
+                         sizeof(*request->ssids) * rdev_req->n_ssids,
                          GFP_KERNEL);
        if (!request) {
                cfg80211_free_coloc_ap_list(&coloc_ap_list);
@@ -848,10 +849,19 @@ skip:
 
        if (request->n_channels) {
                struct cfg80211_scan_request *old = rdev->int_scan_req;
-
                rdev->int_scan_req = request;
 
                /*
+                * Add the ssids from the parent scan request to the new scan
+                * request, so the driver would be able to use them in its
+                * probe requests to discover hidden APs on PSC channels.
+                */
+               request->ssids = (void *)&request->channels[request->n_channels];
+               request->n_ssids = rdev_req->n_ssids;
+               memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
+                      request->n_ssids);
+
+               /*
                 * If this scan follows a previous scan, save the scan start
                 * info from the first part of the scan
                 */
index 9b959e3..0c3f05c 100644 (file)
@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
        if (rdev->wiphy.registered && rdev->ops->resume)
                ret = rdev_resume(rdev);
        wiphy_unlock(&rdev->wiphy);
+
+       if (ret)
+               cfg80211_shutdown_all_interfaces(&rdev->wiphy);
+
        rtnl_unlock();
 
        return ret;
index 76b777d..440bce5 100644 (file)
@@ -1195,8 +1195,9 @@ TRACE_EVENT(rdev_auth,
 
 TRACE_EVENT(rdev_assoc,
        TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
-                struct cfg80211_assoc_request *req),
-       TP_ARGS(wiphy, netdev, req),
+                struct cfg80211_assoc_request *req,
+                const struct cfg80211_bss_ies *bss_ies),
+       TP_ARGS(wiphy, netdev, req, bss_ies),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                NETDEV_ENTRY
@@ -1204,6 +1205,17 @@ TRACE_EVENT(rdev_assoc,
                MAC_ENTRY(prev_bssid)
                __field(bool, use_mfp)
                __field(u32, flags)
+               __dynamic_array(u8, bss_elements, bss_ies->len)
+               __field(bool, bss_elements_bcon)
+               __field(u64, bss_elements_tsf)
+               __dynamic_array(u8, elements, req->ie_len)
+               __array(u8, ht_capa, sizeof(struct ieee80211_ht_cap))
+               __array(u8, ht_capa_mask, sizeof(struct ieee80211_ht_cap))
+               __array(u8, vht_capa, sizeof(struct ieee80211_vht_cap))
+               __array(u8, vht_capa_mask, sizeof(struct ieee80211_vht_cap))
+               __dynamic_array(u8, fils_kek, req->fils_kek_len)
+               __dynamic_array(u8, fils_nonces,
+                               req->fils_nonces ? 2 * FILS_NONCE_LEN : 0)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
@@ -1215,6 +1227,26 @@ TRACE_EVENT(rdev_assoc,
                MAC_ASSIGN(prev_bssid, req->prev_bssid);
                __entry->use_mfp = req->use_mfp;
                __entry->flags = req->flags;
+               if (bss_ies->len)
+                       memcpy(__get_dynamic_array(bss_elements),
+                              bss_ies->data, bss_ies->len);
+               __entry->bss_elements_bcon = bss_ies->from_beacon;
+               __entry->bss_elements_tsf = bss_ies->tsf;
+               if (req->ie)
+                       memcpy(__get_dynamic_array(elements),
+                              req->ie, req->ie_len);
+               memcpy(__entry->ht_capa, &req->ht_capa, sizeof(req->ht_capa));
+               memcpy(__entry->ht_capa_mask, &req->ht_capa_mask,
+                      sizeof(req->ht_capa_mask));
+               memcpy(__entry->vht_capa, &req->vht_capa, sizeof(req->vht_capa));
+               memcpy(__entry->vht_capa_mask, &req->vht_capa_mask,
+                      sizeof(req->vht_capa_mask));
+               if (req->fils_kek)
+                       memcpy(__get_dynamic_array(fils_kek),
+                              req->fils_kek, req->fils_kek_len);
+               if (req->fils_nonces)
+                       memcpy(__get_dynamic_array(fils_nonces),
+                              req->fils_nonces, 2 * FILS_NONCE_LEN);
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
                  ", previous bssid: " MAC_PR_FMT ", use mfp: %s, flags: %u",
index 7ec021a..18dba3d 100644 (file)
@@ -1059,6 +1059,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                case NL80211_IFTYPE_MESH_POINT:
                        /* mesh should be handled? */
                        break;
+               case NL80211_IFTYPE_OCB:
+                       cfg80211_leave_ocb(rdev, dev);
+                       break;
                default:
                        break;
                }
index a8320dc..a32065d 100644 (file)
@@ -902,7 +902,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
 
        /* only change when not disabling */
        if (!data->txpower.disabled) {
-               rfkill_set_sw_state(rdev->rfkill, false);
+               rfkill_set_sw_state(rdev->wiphy.rfkill, false);
 
                if (data->txpower.fixed) {
                        /*
@@ -927,7 +927,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
                        }
                }
        } else {
-               if (rfkill_set_sw_state(rdev->rfkill, true))
+               if (rfkill_set_sw_state(rdev->wiphy.rfkill, true))
                        schedule_work(&rdev->rfkill_block);
                return 0;
        }
@@ -963,7 +963,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
 
        /* well... oh well */
        data->txpower.fixed = 1;
-       data->txpower.disabled = rfkill_blocked(rdev->rfkill);
+       data->txpower.disabled = rfkill_blocked(rdev->wiphy.rfkill);
        data->txpower.value = val;
        data->txpower.flags = IW_TXPOW_DBM;
 
@@ -1167,7 +1167,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       bool ps = wdev->ps;
+       bool ps;
        int timeout = wdev->ps_timeout;
        int err;
 
index 33bef22..b379a03 100644 (file)
@@ -120,8 +120,8 @@ int iw_handler_set_thrspy(struct net_device *       dev,
                return -EOPNOTSUPP;
 
        /* Just do it */
-       memcpy(&(spydata->spy_thr_low), &(threshold->low),
-              2 * sizeof(struct iw_quality));
+       spydata->spy_thr_low = threshold->low;
+       spydata->spy_thr_high = threshold->high;
 
        /* Clear flag */
        memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under));
@@ -147,8 +147,8 @@ int iw_handler_get_thrspy(struct net_device *       dev,
                return -EOPNOTSUPP;
 
        /* Just do it */
-       memcpy(&(threshold->low), &(spydata->spy_thr_low),
-              2 * sizeof(struct iw_quality));
+       threshold->low = spydata->spy_thr_low;
+       threshold->high = spydata->spy_thr_high;
 
        return 0;
 }
@@ -173,10 +173,10 @@ static void iw_send_thrspy_event(struct net_device *      dev,
        memcpy(threshold.addr.sa_data, address, ETH_ALEN);
        threshold.addr.sa_family = ARPHRD_ETHER;
        /* Copy stats */
-       memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality));
+       threshold.qual = *wstats;
        /* Copy also thresholds */
-       memcpy(&(threshold.low), &(spydata->spy_thr_low),
-              2 * sizeof(struct iw_quality));
+       threshold.low = spydata->spy_thr_low;
+       threshold.high = spydata->spy_thr_high;
 
        /* Send event to user space */
        wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold);
index ce66323..d12bb90 100644 (file)
@@ -131,6 +131,13 @@ __xfrm_spi_hash(const xfrm_address_t *daddr, __be32 spi, u8 proto,
        return (h ^ (h >> 10) ^ (h >> 20)) & hmask;
 }
 
+static inline unsigned int
+__xfrm_seq_hash(u32 seq, unsigned int hmask)
+{
+       unsigned int h = seq;
+       return (h ^ (h >> 10) ^ (h >> 20)) & hmask;
+}
+
 static inline unsigned int __idx_hash(u32 index, unsigned int hmask)
 {
        return (index ^ (index >> 8)) & hmask;
index 1158cd0..3df0861 100644 (file)
@@ -612,7 +612,7 @@ lock:
                        goto drop_unlock;
                }
 
-               if (x->repl->check(x, skb, seq)) {
+               if (xfrm_replay_check(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
@@ -660,12 +660,12 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
-               if (x->repl->recheck(x, skb, seq)) {
+               if (xfrm_replay_recheck(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
 
-               x->repl->advance(x, seq);
+               xfrm_replay_advance(x, seq);
 
                x->curlft.bytes += skb->len;
                x->curlft.packets++;
index e4cb0ff..ab2fbe4 100644 (file)
@@ -77,6 +77,83 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
+{
+       const unsigned char *nh = skb_network_header(skb);
+       unsigned int offset = sizeof(struct ipv6hdr);
+       unsigned int packet_len;
+       int found_rhdr = 0;
+
+       packet_len = skb_tail_pointer(skb) - nh;
+       *nexthdr = &ipv6_hdr(skb)->nexthdr;
+
+       while (offset <= packet_len) {
+               struct ipv6_opt_hdr *exthdr;
+
+               switch (**nexthdr) {
+               case NEXTHDR_HOP:
+                       break;
+               case NEXTHDR_ROUTING:
+                       if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
+                               struct ipv6_rt_hdr *rt;
+
+                               rt = (struct ipv6_rt_hdr *)(nh + offset);
+                               if (rt->type != 0)
+                                       return offset;
+                       }
+                       found_rhdr = 1;
+                       break;
+               case NEXTHDR_DEST:
+                       /* HAO MUST NOT appear more than once.
+                        * XXX: It is better to try to find by the end of
+                        * XXX: packet if HAO exists.
+                        */
+                       if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
+                               net_dbg_ratelimited("mip6: hao exists already, override\n");
+                               return offset;
+                       }
+
+                       if (found_rhdr)
+                               return offset;
+
+                       break;
+               default:
+                       return offset;
+               }
+
+               if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+                       return -EINVAL;
+
+               exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+                                                offset);
+               offset += ipv6_optlen(exthdr);
+               if (offset > IPV6_MAXPLEN)
+                       return -EINVAL;
+               *nexthdr = &exthdr->nexthdr;
+       }
+
+       return -EINVAL;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
+{
+       switch (x->type->proto) {
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+       case IPPROTO_DSTOPTS:
+       case IPPROTO_ROUTING:
+               return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
+#endif
+       default:
+               break;
+       }
+
+       return ip6_find_1stfragopt(skb, prevhdr);
+}
+#endif
+
 /* Add encapsulation header.
  *
  * The IP header and mutable extension headers will be moved forward to make
@@ -92,7 +169,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
        iph = ipv6_hdr(skb);
        skb_set_inner_transport_header(skb, skb_transport_offset(skb));
 
-       hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
        if (hdr_len < 0)
                return hdr_len;
        skb_set_mac_header(skb,
@@ -122,7 +199,7 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
 
        iph = ipv6_hdr(skb);
 
-       hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
        if (hdr_len < 0)
                return hdr_len;
        skb_set_mac_header(skb,
@@ -448,7 +525,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                        goto error;
                }
 
-               err = x->repl->overflow(x, skb);
+               err = xfrm_replay_overflow(x, skb);
                if (err) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
                        goto error;
@@ -565,6 +642,42 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
        return 0;
 }
 
+/* For partial checksum offload, the outer header checksum is calculated
+ * by software and the inner header checksum is calculated by hardware.
+ * This requires hardware to know the inner packet type to calculate
+ * the inner header checksum. Save inner ip protocol here to avoid
+ * traversing the packet in the vendor's xmit code.
+ * If the encap type is IPIP, just save skb->inner_ipproto. Otherwise,
+ * get the ip protocol from the IP header.
+ */
+static void xfrm_get_inner_ipproto(struct sk_buff *skb)
+{
+       struct xfrm_offload *xo = xfrm_offload(skb);
+       const struct ethhdr *eth;
+
+       if (!xo)
+               return;
+
+       if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+               xo->inner_ipproto = skb->inner_ipproto;
+               return;
+       }
+
+       if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+               return;
+
+       eth = (struct ethhdr *)skb_inner_mac_header(skb);
+
+       switch (ntohs(eth->h_proto)) {
+       case ETH_P_IPV6:
+               xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
+               break;
+       case ETH_P_IP:
+               xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
+               break;
+       }
+}
+
 int xfrm_output(struct sock *sk, struct sk_buff *skb)
 {
        struct net *net = dev_net(skb_dst(skb)->dev);
@@ -594,12 +707,15 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
                        kfree_skb(skb);
                        return -ENOMEM;
                }
-               skb->encapsulation = 1;
 
                sp->olen++;
                sp->xvec[sp->len++] = x;
                xfrm_state_hold(x);
 
+               if (skb->encapsulation)
+                       xfrm_get_inner_ipproto(skb);
+               skb->encapsulation = 1;
+
                if (skb_is_gso(skb)) {
                        if (skb->inner_protocol)
                                return xfrm_output_gso(net, sk, skb);
index ce500f8..1e24b21 100644 (file)
@@ -3247,7 +3247,7 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
 
 /*
  * 0 or more than 0 is returned when validation is succeeded (either bypass
- * because of optional transport mode, or next index of the mathced secpath
+ * because of optional transport mode, or next index of the matched secpath
  * state with the template.
  * -1 is returned when no matching template is found.
  * Otherwise "-2 - errored_index" is returned.
index c6a4338..9277d81 100644 (file)
@@ -34,8 +34,11 @@ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
        return seq_hi;
 }
 EXPORT_SYMBOL(xfrm_replay_seqhi);
-;
-static void xfrm_replay_notify(struct xfrm_state *x, int event)
+
+static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event);
+static void xfrm_replay_notify_esn(struct xfrm_state *x, int event);
+
+void xfrm_replay_notify(struct xfrm_state *x, int event)
 {
        struct km_event c;
        /* we send notify messages in case
@@ -48,6 +51,17 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
         *  The state structure must be locked!
         */
 
+       switch (x->repl_mode) {
+       case XFRM_REPLAY_MODE_LEGACY:
+               break;
+       case XFRM_REPLAY_MODE_BMP:
+               xfrm_replay_notify_bmp(x, event);
+               return;
+       case XFRM_REPLAY_MODE_ESN:
+               xfrm_replay_notify_esn(x, event);
+               return;
+       }
+
        switch (event) {
        case XFRM_REPLAY_UPDATE:
                if (!x->replay_maxdiff ||
@@ -81,7 +95,7 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
                x->xflags &= ~XFRM_TIME_DEFER;
 }
 
-static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+static int __xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err = 0;
        struct net *net = xs_net(x);
@@ -98,14 +112,14 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
                        return err;
                }
                if (xfrm_aevent_is_on(net))
-                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+                       xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
        }
 
        return err;
 }
 
-static int xfrm_replay_check(struct xfrm_state *x,
-                     struct sk_buff *skb, __be32 net_seq)
+static int xfrm_replay_check_legacy(struct xfrm_state *x,
+                                   struct sk_buff *skb, __be32 net_seq)
 {
        u32 diff;
        u32 seq = ntohl(net_seq);
@@ -136,14 +150,26 @@ err:
        return -EINVAL;
 }
 
-static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
+static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq);
+static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq);
+
+void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
 {
-       u32 diff;
-       u32 seq = ntohl(net_seq);
+       u32 diff, seq;
+
+       switch (x->repl_mode) {
+       case XFRM_REPLAY_MODE_LEGACY:
+               break;
+       case XFRM_REPLAY_MODE_BMP:
+               return xfrm_replay_advance_bmp(x, net_seq);
+       case XFRM_REPLAY_MODE_ESN:
+               return xfrm_replay_advance_esn(x, net_seq);
+       }
 
        if (!x->props.replay_window)
                return;
 
+       seq = ntohl(net_seq);
        if (seq > x->replay.seq) {
                diff = seq - x->replay.seq;
                if (diff < x->props.replay_window)
@@ -157,7 +183,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
        }
 
        if (xfrm_aevent_is_on(xs_net(x)))
-               x->repl->notify(x, XFRM_REPLAY_UPDATE);
+               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
 }
 
 static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
@@ -178,7 +204,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
                        return err;
                }
                if (xfrm_aevent_is_on(net))
-                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+                       xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
        }
 
        return err;
@@ -273,7 +299,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
        replay_esn->bmp[nr] |= (1U << bitnr);
 
        if (xfrm_aevent_is_on(xs_net(x)))
-               x->repl->notify(x, XFRM_REPLAY_UPDATE);
+               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
 }
 
 static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
@@ -416,7 +442,7 @@ static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
                        }
                }
                if (xfrm_aevent_is_on(net))
-                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+                       xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
        }
 
        return err;
@@ -481,6 +507,21 @@ err:
        return -EINVAL;
 }
 
+int xfrm_replay_check(struct xfrm_state *x,
+                     struct sk_buff *skb, __be32 net_seq)
+{
+       switch (x->repl_mode) {
+       case XFRM_REPLAY_MODE_LEGACY:
+               break;
+       case XFRM_REPLAY_MODE_BMP:
+               return xfrm_replay_check_bmp(x, skb, net_seq);
+       case XFRM_REPLAY_MODE_ESN:
+               return xfrm_replay_check_esn(x, skb, net_seq);
+       }
+
+       return xfrm_replay_check_legacy(x, skb, net_seq);
+}
+
 static int xfrm_replay_recheck_esn(struct xfrm_state *x,
                                   struct sk_buff *skb, __be32 net_seq)
 {
@@ -493,6 +534,22 @@ static int xfrm_replay_recheck_esn(struct xfrm_state *x,
        return xfrm_replay_check_esn(x, skb, net_seq);
 }
 
+int xfrm_replay_recheck(struct xfrm_state *x,
+                       struct sk_buff *skb, __be32 net_seq)
+{
+       switch (x->repl_mode) {
+       case XFRM_REPLAY_MODE_LEGACY:
+               break;
+       case XFRM_REPLAY_MODE_BMP:
+               /* no special recheck treatment */
+               return xfrm_replay_check_bmp(x, skb, net_seq);
+       case XFRM_REPLAY_MODE_ESN:
+               return xfrm_replay_recheck_esn(x, skb, net_seq);
+       }
+
+       return xfrm_replay_check_legacy(x, skb, net_seq);
+}
+
 static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
 {
        unsigned int bitnr, nr, i;
@@ -548,7 +605,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
        replay_esn->bmp[nr] |= (1U << bitnr);
 
        if (xfrm_aevent_is_on(xs_net(x)))
-               x->repl->notify(x, XFRM_REPLAY_UPDATE);
+               xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
 }
 
 #ifdef CONFIG_XFRM_OFFLOAD
@@ -560,7 +617,7 @@ static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *sk
        __u32 oseq = x->replay.oseq;
 
        if (!xo)
-               return xfrm_replay_overflow(x, skb);
+               return __xfrm_replay_overflow(x, skb);
 
        if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
                if (!skb_is_gso(skb)) {
@@ -585,7 +642,7 @@ static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *sk
                x->replay.oseq = oseq;
 
                if (xfrm_aevent_is_on(net))
-                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+                       xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
        }
 
        return err;
@@ -625,7 +682,7 @@ static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff
                }
 
                if (xfrm_aevent_is_on(net))
-                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+                       xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
        }
 
        return err;
@@ -674,59 +731,39 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
                replay_esn->oseq = oseq;
 
                if (xfrm_aevent_is_on(net))
-                       x->repl->notify(x, XFRM_REPLAY_UPDATE);
+                       xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
        }
 
        return err;
 }
 
-static const struct xfrm_replay xfrm_replay_legacy = {
-       .advance        = xfrm_replay_advance,
-       .check          = xfrm_replay_check,
-       .recheck        = xfrm_replay_check,
-       .notify         = xfrm_replay_notify,
-       .overflow       = xfrm_replay_overflow_offload,
-};
-
-static const struct xfrm_replay xfrm_replay_bmp = {
-       .advance        = xfrm_replay_advance_bmp,
-       .check          = xfrm_replay_check_bmp,
-       .recheck        = xfrm_replay_check_bmp,
-       .notify         = xfrm_replay_notify_bmp,
-       .overflow       = xfrm_replay_overflow_offload_bmp,
-};
-
-static const struct xfrm_replay xfrm_replay_esn = {
-       .advance        = xfrm_replay_advance_esn,
-       .check          = xfrm_replay_check_esn,
-       .recheck        = xfrm_replay_recheck_esn,
-       .notify         = xfrm_replay_notify_esn,
-       .overflow       = xfrm_replay_overflow_offload_esn,
-};
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+{
+       switch (x->repl_mode) {
+       case XFRM_REPLAY_MODE_LEGACY:
+               break;
+       case XFRM_REPLAY_MODE_BMP:
+               return xfrm_replay_overflow_offload_bmp(x, skb);
+       case XFRM_REPLAY_MODE_ESN:
+               return xfrm_replay_overflow_offload_esn(x, skb);
+       }
+
+       return xfrm_replay_overflow_offload(x, skb);
+}
 #else
-static const struct xfrm_replay xfrm_replay_legacy = {
-       .advance        = xfrm_replay_advance,
-       .check          = xfrm_replay_check,
-       .recheck        = xfrm_replay_check,
-       .notify         = xfrm_replay_notify,
-       .overflow       = xfrm_replay_overflow,
-};
-
-static const struct xfrm_replay xfrm_replay_bmp = {
-       .advance        = xfrm_replay_advance_bmp,
-       .check          = xfrm_replay_check_bmp,
-       .recheck        = xfrm_replay_check_bmp,
-       .notify         = xfrm_replay_notify_bmp,
-       .overflow       = xfrm_replay_overflow_bmp,
-};
-
-static const struct xfrm_replay xfrm_replay_esn = {
-       .advance        = xfrm_replay_advance_esn,
-       .check          = xfrm_replay_check_esn,
-       .recheck        = xfrm_replay_recheck_esn,
-       .notify         = xfrm_replay_notify_esn,
-       .overflow       = xfrm_replay_overflow_esn,
-};
+int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
+{
+       switch (x->repl_mode) {
+       case XFRM_REPLAY_MODE_LEGACY:
+               break;
+       case XFRM_REPLAY_MODE_BMP:
+               return xfrm_replay_overflow_bmp(x, skb);
+       case XFRM_REPLAY_MODE_ESN:
+               return xfrm_replay_overflow_esn(x, skb);
+       }
+
+       return __xfrm_replay_overflow(x, skb);
+}
 #endif
 
 int xfrm_init_replay(struct xfrm_state *x)
@@ -741,12 +778,12 @@ int xfrm_init_replay(struct xfrm_state *x)
                if (x->props.flags & XFRM_STATE_ESN) {
                        if (replay_esn->replay_window == 0)
                                return -EINVAL;
-                       x->repl = &xfrm_replay_esn;
+                       x->repl_mode = XFRM_REPLAY_MODE_ESN;
                } else {
-                       x->repl = &xfrm_replay_bmp;
+                       x->repl_mode = XFRM_REPLAY_MODE_BMP;
                }
        } else {
-               x->repl = &xfrm_replay_legacy;
+               x->repl_mode = XFRM_REPLAY_MODE_LEGACY;
        }
 
        return 0;
index 4496f7e..c2ce1e6 100644 (file)
@@ -78,10 +78,16 @@ xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
        return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
 }
 
+static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
+{
+       return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
+}
+
 static void xfrm_hash_transfer(struct hlist_head *list,
                               struct hlist_head *ndsttable,
                               struct hlist_head *nsrctable,
                               struct hlist_head *nspitable,
+                              struct hlist_head *nseqtable,
                               unsigned int nhashmask)
 {
        struct hlist_node *tmp;
@@ -106,6 +112,11 @@ static void xfrm_hash_transfer(struct hlist_head *list,
                                            nhashmask);
                        hlist_add_head_rcu(&x->byspi, nspitable + h);
                }
+
+               if (x->km.seq) {
+                       h = __xfrm_seq_hash(x->km.seq, nhashmask);
+                       hlist_add_head_rcu(&x->byseq, nseqtable + h);
+               }
        }
 }
 
@@ -117,7 +128,7 @@ static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
 static void xfrm_hash_resize(struct work_struct *work)
 {
        struct net *net = container_of(work, struct net, xfrm.state_hash_work);
-       struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
+       struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq;
        unsigned long nsize, osize;
        unsigned int nhashmask, ohashmask;
        int i;
@@ -137,6 +148,13 @@ static void xfrm_hash_resize(struct work_struct *work)
                xfrm_hash_free(nsrc, nsize);
                return;
        }
+       nseq = xfrm_hash_alloc(nsize);
+       if (!nseq) {
+               xfrm_hash_free(ndst, nsize);
+               xfrm_hash_free(nsrc, nsize);
+               xfrm_hash_free(nspi, nsize);
+               return;
+       }
 
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
        write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
@@ -144,15 +162,17 @@ static void xfrm_hash_resize(struct work_struct *work)
        nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
        odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
        for (i = net->xfrm.state_hmask; i >= 0; i--)
-               xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
+               xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask);
 
        osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
        ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
+       oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net);
        ohashmask = net->xfrm.state_hmask;
 
        rcu_assign_pointer(net->xfrm.state_bydst, ndst);
        rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
        rcu_assign_pointer(net->xfrm.state_byspi, nspi);
+       rcu_assign_pointer(net->xfrm.state_byseq, nseq);
        net->xfrm.state_hmask = nhashmask;
 
        write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
@@ -165,6 +185,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        xfrm_hash_free(odst, osize);
        xfrm_hash_free(osrc, osize);
        xfrm_hash_free(ospi, osize);
+       xfrm_hash_free(oseq, osize);
 }
 
 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
@@ -621,6 +642,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
                INIT_HLIST_NODE(&x->bydst);
                INIT_HLIST_NODE(&x->bysrc);
                INIT_HLIST_NODE(&x->byspi);
+               INIT_HLIST_NODE(&x->byseq);
                hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
                x->mtimer.function = xfrm_timer_handler;
                timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
@@ -664,6 +686,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
                list_del(&x->km.all);
                hlist_del_rcu(&x->bydst);
                hlist_del_rcu(&x->bysrc);
+               if (x->km.seq)
+                       hlist_del_rcu(&x->byseq);
                if (x->id.spi)
                        hlist_del_rcu(&x->byspi);
                net->xfrm.state_num--;
@@ -1148,6 +1172,10 @@ found:
                                h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
                                hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
                        }
+                       if (x->km.seq) {
+                               h = xfrm_seq_hash(net, x->km.seq);
+                               hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
+                       }
                        x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
                        hrtimer_start(&x->mtimer,
                                      ktime_set(net->xfrm.sysctl_acq_expires, 0),
@@ -1263,6 +1291,12 @@ static void __xfrm_state_insert(struct xfrm_state *x)
                hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
        }
 
+       if (x->km.seq) {
+               h = xfrm_seq_hash(net, x->km.seq);
+
+               hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
+       }
+
        hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
        if (x->replay_maxage)
                mod_timer(&x->rtimer, jiffies + x->replay_maxage);
@@ -1932,20 +1966,18 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
 
 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
 {
-       int i;
-
-       for (i = 0; i <= net->xfrm.state_hmask; i++) {
-               struct xfrm_state *x;
+       unsigned int h = xfrm_seq_hash(net, seq);
+       struct xfrm_state *x;
 
-               hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
-                       if (x->km.seq == seq &&
-                           (mark & x->mark.m) == x->mark.v &&
-                           x->km.state == XFRM_STATE_ACQ) {
-                               xfrm_state_hold(x);
-                               return x;
-                       }
+       hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
+               if (x->km.seq == seq &&
+                   (mark & x->mark.m) == x->mark.v &&
+                   x->km.state == XFRM_STATE_ACQ) {
+                       xfrm_state_hold(x);
+                       return x;
                }
        }
+
        return NULL;
 }
 
@@ -2145,7 +2177,7 @@ static void xfrm_replay_timer_handler(struct timer_list *t)
 
        if (x->km.state == XFRM_STATE_VALID) {
                if (xfrm_aevent_is_on(xs_net(x)))
-                       x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
+                       xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
                else
                        x->xflags |= XFRM_TIME_DEFER;
        }
@@ -2660,6 +2692,9 @@ int __net_init xfrm_state_init(struct net *net)
        net->xfrm.state_byspi = xfrm_hash_alloc(sz);
        if (!net->xfrm.state_byspi)
                goto out_byspi;
+       net->xfrm.state_byseq = xfrm_hash_alloc(sz);
+       if (!net->xfrm.state_byseq)
+               goto out_byseq;
        net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
 
        net->xfrm.state_num = 0;
@@ -2669,6 +2704,8 @@ int __net_init xfrm_state_init(struct net *net)
                               &net->xfrm.xfrm_state_lock);
        return 0;
 
+out_byseq:
+       xfrm_hash_free(net->xfrm.state_byspi, sz);
 out_byspi:
        xfrm_hash_free(net->xfrm.state_bysrc, sz);
 out_bysrc:
@@ -2688,6 +2725,8 @@ void xfrm_state_fini(struct net *net)
        WARN_ON(!list_empty(&net->xfrm.state_all));
 
        sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
+       WARN_ON(!hlist_empty(net->xfrm.state_byseq));
+       xfrm_hash_free(net->xfrm.state_byseq, sz);
        WARN_ON(!hlist_empty(net->xfrm.state_byspi));
        xfrm_hash_free(net->xfrm.state_byspi, sz);
        WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
index 21dbf63..9ec93d9 100644 (file)
@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
        if (format != DRM_FORMAT_XRGB8888) {
                pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
                        format, DRM_FORMAT_XRGB8888);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (width < 100  || width > 10000) {
                pci_err(pdev, "width (%d) out of range\n", width);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (height < 100 || height > 10000) {
                pci_err(pdev, "height (%d) out of range\n", height);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
                 width, height);
 
        info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
-       if (!info)
+       if (!info) {
+               ret = -ENOMEM;
                goto err_release_regions;
+       }
        pci_set_drvdata(pdev, info);
        par = info->par;
 
index f9b1952..1e9baa5 100644 (file)
@@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
                                 Elf32_Word const *symtab_shndx)
 {
        unsigned long offset;
+       unsigned short shndx = w2(sym->st_shndx);
        int index;
 
-       if (sym->st_shndx != SHN_XINDEX)
-               return w2(sym->st_shndx);
+       if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
+               return shndx;
 
-       offset = (unsigned long)sym - (unsigned long)symtab;
-       index = offset / sizeof(*sym);
+       if (shndx == SHN_XINDEX) {
+               offset = (unsigned long)sym - (unsigned long)symtab;
+               index = offset / sizeof(*sym);
 
-       return w(symtab_shndx[index]);
+               return w(symtab_shndx[index]);
+       }
+
+       return 0;
 }
 
 static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
index 25f57c1..a90e31d 100644 (file)
@@ -17,6 +17,9 @@ MODULE_LICENSE("GPL");
 #define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \
                        >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1)
 
+#define to_led_card_dev(_dev) \
+       container_of(_dev, struct snd_ctl_led_card, dev)
+
 enum snd_ctl_led_mode {
         MODE_FOLLOW_MUTE = 0,
         MODE_FOLLOW_ROUTE,
@@ -371,6 +374,21 @@ static void snd_ctl_led_disconnect(struct snd_card *card)
        snd_ctl_led_refresh();
 }
 
+static void snd_ctl_led_card_release(struct device *dev)
+{
+       struct snd_ctl_led_card *led_card = to_led_card_dev(dev);
+
+       kfree(led_card);
+}
+
+static void snd_ctl_led_release(struct device *dev)
+{
+}
+
+static void snd_ctl_led_dev_release(struct device *dev)
+{
+}
+
 /*
  * sysfs
  */
@@ -663,6 +681,7 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card)
                led_card->number = card->number;
                led_card->led = led;
                device_initialize(&led_card->dev);
+               led_card->dev.release = snd_ctl_led_card_release;
                if (dev_set_name(&led_card->dev, "card%d", card->number) < 0)
                        goto cerr;
                led_card->dev.parent = &led->dev;
@@ -681,7 +700,6 @@ cerr:
                put_device(&led_card->dev);
 cerr2:
                printk(KERN_ERR "snd_ctl_led: unable to add card%d", card->number);
-               kfree(led_card);
        }
 }
 
@@ -700,8 +718,7 @@ static void snd_ctl_led_sysfs_remove(struct snd_card *card)
                snprintf(link_name, sizeof(link_name), "led-%s", led->name);
                sysfs_remove_link(&card->ctl_dev.kobj, link_name);
                sysfs_remove_link(&led_card->dev.kobj, "card");
-               device_del(&led_card->dev);
-               kfree(led_card);
+               device_unregister(&led_card->dev);
                led->cards[card->number] = NULL;
        }
 }
@@ -723,6 +740,7 @@ static int __init snd_ctl_led_init(void)
 
        device_initialize(&snd_ctl_led_dev);
        snd_ctl_led_dev.class = sound_class;
+       snd_ctl_led_dev.release = snd_ctl_led_dev_release;
        dev_set_name(&snd_ctl_led_dev, "ctl-led");
        if (device_add(&snd_ctl_led_dev)) {
                put_device(&snd_ctl_led_dev);
@@ -733,15 +751,16 @@ static int __init snd_ctl_led_init(void)
                INIT_LIST_HEAD(&led->controls);
                device_initialize(&led->dev);
                led->dev.parent = &snd_ctl_led_dev;
+               led->dev.release = snd_ctl_led_release;
                led->dev.groups = snd_ctl_led_dev_attr_groups;
                dev_set_name(&led->dev, led->name);
                if (device_add(&led->dev)) {
                        put_device(&led->dev);
                        for (; group > 0; group--) {
                                led = &snd_ctl_leds[group - 1];
-                               device_del(&led->dev);
+                               device_unregister(&led->dev);
                        }
-                       device_del(&snd_ctl_led_dev);
+                       device_unregister(&snd_ctl_led_dev);
                        return -ENOMEM;
                }
        }
@@ -767,9 +786,9 @@ static void __exit snd_ctl_led_exit(void)
        }
        for (group = 0; group < MAX_LED; group++) {
                led = &snd_ctl_leds[group];
-               device_del(&led->dev);
+               device_unregister(&led->dev);
        }
-       device_del(&snd_ctl_led_dev);
+       device_unregister(&snd_ctl_led_dev);
        snd_ctl_led_clean(NULL);
 }
 
index 1645e41..9863be6 100644 (file)
@@ -297,8 +297,16 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
                return err;
        }
        spin_lock_irq(&tmr->lock);
-       tmr->timeri = t;
+       if (tmr->timeri)
+               err = -EBUSY;
+       else
+               tmr->timeri = t;
        spin_unlock_irq(&tmr->lock);
+       if (err < 0) {
+               snd_timer_close(t);
+               snd_timer_instance_free(t);
+               return err;
+       }
        return 0;
 }
 
index 6898b1a..92b7008 100644 (file)
@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
                return;
        if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
                return;
+       event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
        list_for_each_entry(ts, &ti->slave_active_head, active_list)
                if (ts->ccallback)
-                       ts->ccallback(ts, event + 100, &tstamp, resolution);
+                       ts->ccallback(ts, event, &tstamp, resolution);
 }
 
 /* start/continue a master timer */
index e0faa66..5805c5d 100644 (file)
@@ -804,7 +804,7 @@ static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
 static inline void cancel_stream(struct amdtp_stream *s)
 {
        s->packet_index = -1;
-       if (current_work() == &s->period_work)
+       if (in_interrupt())
                amdtp_stream_pcm_abort(s);
        WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 }
index ab5ff78..d8be146 100644 (file)
@@ -331,6 +331,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51c8,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cc,
+       },
 #endif
 
 };
index a31009a..5462f77 100644 (file)
@@ -2917,6 +2917,7 @@ static int hda_codec_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int hda_codec_pm_prepare(struct device *dev)
 {
+       dev->power.power_state = PMSG_SUSPEND;
        return pm_runtime_suspended(dev);
 }
 
@@ -2924,6 +2925,10 @@ static void hda_codec_pm_complete(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
+       /* If no other pm-functions are called between prepare() and complete() */
+       if (dev->power.power_state.event == PM_EVENT_SUSPEND)
+               dev->power.power_state = PMSG_RESUME;
+
        if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
            hda_codec_need_resume(codec) || codec->forced_resume))
                pm_request_resume(dev);
index b638fc2..1f8018f 100644 (file)
@@ -3520,6 +3520,7 @@ static int cap_sw_put(struct snd_kcontrol *kcontrol,
 static const struct snd_kcontrol_new cap_sw_temp = {
        .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
        .name = "Capture Switch",
+       .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
        .info = cap_sw_info,
        .get = cap_sw_get,
        .put = cap_sw_put,
index 79ade33..470753b 100644 (file)
@@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Alderlake-P */
        { PCI_DEVICE(0x8086, 0x51c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Alderlake-M */
+       { PCI_DEVICE(0x8086, 0x51cc),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 726507d..8629e84 100644 (file)
@@ -2206,10 +2206,9 @@ static void cs8409_cs42l42_fixups(struct hda_codec *codec,
                break;
        case HDA_FIXUP_ACT_PROBE:
 
-               /* Set initial volume on Bullseye to -26 dB */
-               if (codec->fixup_id == CS8409_BULLSEYE)
-                       snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
-                                       HDA_INPUT, 0, 0xff, 0x19);
+               /* Set initial DMIC volume to -26 dB */
+               snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
+                               HDA_INPUT, 0, 0xff, 0x19);
                snd_hda_gen_add_kctl(&spec->gen,
                        NULL, &cs8409_cs42l42_hp_volume_mixer);
                snd_hda_gen_add_kctl(&spec->gen,
index 552e2cb..ab5113c 100644 (file)
@@ -2603,6 +2603,28 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
        {}
 };
 
+static const struct snd_hda_pin_quirk alc882_pin_fixup_tbl[] = {
+       SND_HDA_PIN_QUIRK(0x10ec1220, 0x1043, "ASUS", ALC1220_FIXUP_CLEVO_P950,
+               {0x14, 0x01014010},
+               {0x15, 0x01011012},
+               {0x16, 0x01016011},
+               {0x18, 0x01a19040},
+               {0x19, 0x02a19050},
+               {0x1a, 0x0181304f},
+               {0x1b, 0x0221401f},
+               {0x1e, 0x01456130}),
+       SND_HDA_PIN_QUIRK(0x10ec1220, 0x1462, "MS-7C35", ALC1220_FIXUP_CLEVO_P950,
+               {0x14, 0x01015010},
+               {0x15, 0x01011012},
+               {0x16, 0x01011011},
+               {0x18, 0x01a11040},
+               {0x19, 0x02a19050},
+               {0x1a, 0x0181104f},
+               {0x1b, 0x0221401f},
+               {0x1e, 0x01451130}),
+       {}
+};
+
 /*
  * BIOS auto configuration
  */
@@ -2644,6 +2666,7 @@ static int patch_alc882(struct hda_codec *codec)
 
        snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
                       alc882_fixups);
+       snd_hda_pick_pin_fixup(codec, alc882_pin_fixup_tbl, alc882_fixups, true);
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
 
        alc_auto_parse_customize_define(codec);
@@ -6543,6 +6566,9 @@ enum {
        ALC295_FIXUP_ASUS_DACS,
        ALC295_FIXUP_HP_OMEN,
        ALC285_FIXUP_HP_SPECTRE_X360,
+       ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
+       ALC623_FIXUP_LENOVO_THINKSTATION_P340,
+       ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8109,6 +8135,27 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
        },
+       [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_ideapad_s740_coef,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+       },
+       [ALC623_FIXUP_LENOVO_THINKSTATION_P340] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_no_shutup,
+               .chained = true,
+               .chain_id = ALC283_FIXUP_HEADSET_MIC,
+       },
+       [ALC255_FIXUP_ACER_HEADPHONE_AND_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x21, 0x03211030 }, /* Change the Headphone location to Left */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8145,6 +8192,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -8266,12 +8314,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -8290,7 +8341,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
        SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -8427,7 +8484,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
-       SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -8477,6 +8534,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+       SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8692,6 +8750,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
        {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
        {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
+       {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
+       {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+       {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
        {}
 };
 #define ALC225_STANDARD_PINS \
index f22bb2b..8148b0d 100644 (file)
@@ -235,10 +235,6 @@ static int acp3x_dma_open(struct snd_soc_component *component,
                return ret;
        }
 
-       if (!adata->play_stream && !adata->capture_stream &&
-           !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
-               rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
-
        i2s_data->acp3x_base = adata->acp3x_base;
        runtime->private_data = i2s_data;
        return ret;
@@ -365,12 +361,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
                }
        }
 
-       /* Disable ACP irq, when the current stream is being closed and
-        * another stream is also not active.
-        */
-       if (!adata->play_stream && !adata->capture_stream &&
-               !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
-               rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
        return 0;
 }
 
index 03fe939..c3f0c8b 100644 (file)
@@ -77,6 +77,7 @@
 #define ACP_POWER_OFF_IN_PROGRESS      0x03
 
 #define ACP3x_ITER_IRER_SAMP_LEN_MASK  0x38
+#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
 
 struct acp3x_platform_info {
        u16 play_i2s_instance;
index d3536fd..a013a60 100644 (file)
@@ -76,6 +76,19 @@ static int acp3x_reset(void __iomem *acp3x_base)
        return -ETIMEDOUT;
 }
 
+static void acp3x_enable_interrupts(void __iomem *acp_base)
+{
+       rv_writel(0x01, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
+static void acp3x_disable_interrupts(void __iomem *acp_base)
+{
+       rv_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
+                 mmACP_EXTERNAL_INTR_STAT);
+       rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_CNTL);
+       rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_ENB);
+}
+
 static int acp3x_init(struct acp3x_dev_data *adata)
 {
        void __iomem *acp3x_base = adata->acp3x_base;
@@ -93,6 +106,7 @@ static int acp3x_init(struct acp3x_dev_data *adata)
                pr_err("ACP3x reset failed\n");
                return ret;
        }
+       acp3x_enable_interrupts(acp3x_base);
        return 0;
 }
 
@@ -100,6 +114,7 @@ static int acp3x_deinit(void __iomem *acp3x_base)
 {
        int ret;
 
+       acp3x_disable_interrupts(acp3x_base);
        /* Reset */
        ret = acp3x_reset(acp3x_base);
        if (ret) {
index 34aed80..37d4600 100644 (file)
@@ -307,7 +307,7 @@ static struct snd_soc_dai_driver ak5558_dai = {
 };
 
 static struct snd_soc_dai_driver ak5552_dai = {
-       .name = "ak5558-aif",
+       .name = "ak5552-aif",
        .capture = {
                .stream_name = "Capture",
                .channels_min = 1,
index f406723..88e79b9 100644 (file)
@@ -261,6 +261,9 @@ static const struct regmap_config cs35l32_regmap = {
        .readable_reg = cs35l32_readable_register,
        .precious_reg = cs35l32_precious_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
index 7ad7b73..e8f3dcf 100644 (file)
@@ -1201,6 +1201,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
                dev_err(&i2c_client->dev,
                        "CS35L33 Device ID (%X). Expected ID %X\n",
                        devid, CS35L33_CHIP_ID);
+               ret = -EINVAL;
                goto err_enable;
        }
 
index 110ee2d..3d3c3c3 100644 (file)
@@ -800,6 +800,9 @@ static struct regmap_config cs35l34_regmap = {
        .readable_reg = cs35l34_readable_register,
        .precious_reg = cs35l34_precious_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs35l34_handle_of_data(struct i2c_client *i2c_client,
index bf982e1..77473c2 100644 (file)
@@ -399,6 +399,9 @@ static const struct regmap_config cs42l42_regmap = {
        .reg_defaults = cs42l42_reg_defaults,
        .num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
index c44a5cd..7cdffdf 100644 (file)
@@ -1175,7 +1175,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
        struct cs42l56_platform_data *pdata =
                dev_get_platdata(&i2c_client->dev);
        int ret, i;
-       unsigned int devid = 0;
+       unsigned int devid;
        unsigned int alpha_rev, metal_rev;
        unsigned int reg;
 
@@ -1245,6 +1245,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
        }
 
        ret = regmap_read(cs42l56->regmap, CS42L56_CHIP_ID_1, &reg);
+       if (ret) {
+               dev_err(&i2c_client->dev, "Failed to read chip ID: %d\n", ret);
+               return ret;
+       }
+
        devid = reg & CS42L56_CHIP_ID_MASK;
        if (devid != CS42L56_DEVID) {
                dev_err(&i2c_client->dev,
index c3f974e..e92baca 100644 (file)
@@ -1268,6 +1268,9 @@ static const struct regmap_config cs42l73_regmap = {
        .volatile_reg = cs42l73_volatile_register,
        .readable_reg = cs42l73_readable_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
index 3d67cbf..abe0cc0 100644 (file)
@@ -912,6 +912,9 @@ static struct regmap_config cs53l30_regmap = {
        .writeable_reg = cs53l30_writeable_register,
        .readable_reg = cs53l30_readable_register,
        .cache_type = REGCACHE_RBTREE,
+
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int cs53l30_i2c_probe(struct i2c_client *client,
index bd3c523..13009d0 100644 (file)
@@ -2181,10 +2181,7 @@ static int da7219_register_dai_clks(struct snd_soc_component *component)
                                 ret);
                        goto err;
                }
-
-               da7219->dai_clks[i] = devm_clk_hw_get_clk(dev, dai_clk_hw, NULL);
-               if (IS_ERR(da7219->dai_clks[i]))
-                       return PTR_ERR(da7219->dai_clks[i]);
+               da7219->dai_clks[i] = dai_clk_hw->clk;
 
                /* For DT setup onecell data, otherwise create lookup */
                if (np) {
index b0ebfc8..171ab7f 100644 (file)
@@ -3579,6 +3579,7 @@ static const struct of_device_id rx_macro_dt_match[] = {
        { .compatible = "qcom,sm8250-lpass-rx-macro" },
        { }
 };
+MODULE_DEVICE_TABLE(of, rx_macro_dt_match);
 
 static struct platform_driver rx_macro_driver = {
        .driver = {
index acd2fbc..27a0d5d 100644 (file)
@@ -1846,6 +1846,7 @@ static const struct of_device_id tx_macro_dt_match[] = {
        { .compatible = "qcom,sm8250-lpass-tx-macro" },
        { }
 };
+MODULE_DEVICE_TABLE(of, tx_macro_dt_match);
 static struct platform_driver tx_macro_driver = {
        .driver = {
                .name = "tx_macro",
index 4be24e7..f8e49e4 100644 (file)
@@ -41,6 +41,7 @@ struct max98088_priv {
        enum max98088_type devtype;
        struct max98088_pdata *pdata;
        struct clk *mclk;
+       unsigned char mclk_prescaler;
        unsigned int sysclk;
        struct max98088_cdata dai[2];
        int eq_textcnt;
@@ -998,13 +999,16 @@ static int max98088_dai1_hw_params(struct snd_pcm_substream *substream,
        /* Configure NI when operating as master */
        if (snd_soc_component_read(component, M98088_REG_14_DAI1_FORMAT)
                & M98088_DAI_MAS) {
+               unsigned long pclk;
+
                if (max98088->sysclk == 0) {
                        dev_err(component->dev, "Invalid system clock frequency\n");
                        return -EINVAL;
                }
                ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
                                * (unsigned long long int)rate;
-               do_div(ni, (unsigned long long int)max98088->sysclk);
+               pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+               ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
                snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI,
                        (ni >> 8) & 0x7F);
                snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO,
@@ -1065,13 +1069,16 @@ static int max98088_dai2_hw_params(struct snd_pcm_substream *substream,
        /* Configure NI when operating as master */
        if (snd_soc_component_read(component, M98088_REG_1C_DAI2_FORMAT)
                & M98088_DAI_MAS) {
+               unsigned long pclk;
+
                if (max98088->sysclk == 0) {
                        dev_err(component->dev, "Invalid system clock frequency\n");
                        return -EINVAL;
                }
                ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
                                * (unsigned long long int)rate;
-               do_div(ni, (unsigned long long int)max98088->sysclk);
+               pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler);
+               ni = DIV_ROUND_CLOSEST_ULL(ni, pclk);
                snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI,
                        (ni >> 8) & 0x7F);
                snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO,
@@ -1113,8 +1120,10 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
         */
        if ((freq >= 10000000) && (freq < 20000000)) {
                snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x10);
+               max98088->mclk_prescaler = 1;
        } else if ((freq >= 20000000) && (freq < 30000000)) {
                snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x20);
+               max98088->mclk_prescaler = 2;
        } else {
                dev_err(component->dev, "Invalid master clock frequency\n");
                return -EINVAL;
index 87f5709..4a50b16 100644 (file)
@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
                NULL, 0),
-       SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
-               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
+               0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
                RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
+       SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
+               NULL, 0),
        SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
                RT5659_PWR_VREF3_BIT, 0, NULL, 0),
 
@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
                RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
 
        /* Input Side */
-       SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
-               0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
                0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
 
 static int rt5659_probe(struct snd_soc_component *component)
 {
+       struct snd_soc_dapm_context *dapm =
+               snd_soc_component_get_dapm(component);
        struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
 
        rt5659->component = component;
 
+       switch (rt5659->pdata.jd_src) {
+       case RT5659_JD_HDA_HEADER:
+               break;
+
+       default:
+               snd_soc_dapm_new_controls(dapm,
+                       rt5659_particular_dapm_widgets,
+                       ARRAY_SIZE(rt5659_particular_dapm_widgets));
+               break;
+       }
+
        return 0;
 }
 
index fed80c8..e78ba3b 100644 (file)
@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
 
        regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
                RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
-       regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+       regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
+       regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
        regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
                RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
        regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
index cc36739..24a084e 100644 (file)
@@ -683,13 +683,13 @@ static int rt711_sdca_set_fu1e_capture_ctl(struct rt711_sdca_priv *rt711)
        ch_r = (rt711->fu1e_dapm_mute || rt711->fu1e_mixer_r_mute) ? 0x01 : 0x00;
 
        err = regmap_write(rt711->regmap,
-                       SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+                       SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
                        RT711_SDCA_CTL_FU_MUTE, CH_L), ch_l);
        if (err < 0)
                return err;
 
        err = regmap_write(rt711->regmap,
-                       SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E,
+                       SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E,
                        RT711_SDCA_CTL_FU_MUTE, CH_R), ch_r);
        if (err < 0)
                return err;
index ffdf7e5..82a24e3 100644 (file)
@@ -408,6 +408,7 @@ static const struct of_device_id sti_sas_dev_match[] = {
        },
        {},
 };
+MODULE_DEVICE_TABLE(of, sti_sas_dev_match);
 
 static int sti_sas_driver_probe(struct platform_device *pdev)
 {
index 81866ae..55b2a1f 100644 (file)
 #define TAS2562_TDM_CFG0_RAMPRATE_MASK         BIT(5)
 #define TAS2562_TDM_CFG0_RAMPRATE_44_1         BIT(5)
 #define TAS2562_TDM_CFG0_SAMPRATE_MASK         GENMASK(3, 1)
-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ    0x0
-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ   0x1
-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ  0x2
-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ   0x3
-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ   0x4
-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ   0x5
-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ    (0x0 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ   (0x1 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ  (0x2 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ   (0x3 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ   (0x4 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ   (0x5 << 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
 
 #define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
 
index 0917d65..556c284 100644 (file)
@@ -119,6 +119,7 @@ config SND_SOC_FSL_RPMSG
        tristate "NXP Audio Base On RPMSG support"
        depends on COMMON_CLK
        depends on RPMSG
+       depends on SND_IMX_SOC || SND_IMX_SOC = n
        select SND_SOC_IMX_RPMSG if SND_IMX_SOC != n
        help
          Say Y if you want to add rpmsg audio support for the Freescale CPUs.
index c62bfd1..4f55b31 100644 (file)
@@ -744,6 +744,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
        /* Initialize sound card */
        priv->pdev = pdev;
        priv->card.dev = &pdev->dev;
+       priv->card.owner = THIS_MODULE;
        ret = snd_soc_of_parse_card_name(&priv->card, "model");
        if (ret) {
                snprintf(priv->name, sizeof(priv->name), "%s-audio",
index 2c8a2fc..5e71382 100644 (file)
@@ -209,7 +209,7 @@ static void graph_parse_mclk_fs(struct device_node *top,
 static int graph_parse_node(struct asoc_simple_priv *priv,
                            struct device_node *ep,
                            struct link_info *li,
-                           int is_cpu)
+                           int *cpu)
 {
        struct device *dev = simple_priv_to_dev(priv);
        struct device_node *top = dev->of_node;
@@ -217,9 +217,9 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
        struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
        struct snd_soc_dai_link_component *dlc;
        struct asoc_simple_dai *dai;
-       int ret, single = 0;
+       int ret;
 
-       if (is_cpu) {
+       if (cpu) {
                dlc = asoc_link_to_cpu(dai_link, 0);
                dai = simple_props_to_dai_cpu(dai_props, 0);
        } else {
@@ -229,7 +229,7 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
 
        graph_parse_mclk_fs(top, ep, dai_props);
 
-       ret = asoc_simple_parse_dai(ep, dlc, &single);
+       ret = asoc_simple_parse_dai(ep, dlc, cpu);
        if (ret < 0)
                return ret;
 
@@ -241,9 +241,6 @@ static int graph_parse_node(struct asoc_simple_priv *priv,
        if (ret < 0)
                return ret;
 
-       if (is_cpu)
-               asoc_simple_canonicalize_cpu(dlc, single);
-
        return 0;
 }
 
@@ -276,33 +273,29 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                                  struct link_info *li)
 {
        struct device *dev = simple_priv_to_dev(priv);
-       struct snd_soc_card *card = simple_priv_to_card(priv);
        struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
        struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
        struct device_node *top = dev->of_node;
        struct device_node *ep = li->cpu ? cpu_ep : codec_ep;
-       struct device_node *port;
-       struct device_node *ports;
-       struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
-       struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
        char dai_name[64];
        int ret;
 
-       port    = of_get_parent(ep);
-       ports   = of_get_parent(port);
-
        dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
 
        if (li->cpu) {
+               struct snd_soc_card *card = simple_priv_to_card(priv);
+               struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+               int is_single_links = 0;
+
                /* Codec is dummy */
 
                /* FE settings */
                dai_link->dynamic               = 1;
                dai_link->dpcm_merged_format    = 1;
 
-               ret = graph_parse_node(priv, cpu_ep, li, 1);
+               ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
                if (ret)
-                       goto out_put_node;
+                       return ret;
 
                snprintf(dai_name, sizeof(dai_name),
                         "fe.%pOFP.%s", cpus->of_node, cpus->dai_name);
@@ -318,8 +311,13 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                 */
                if (card->component_chaining && !soc_component_is_pcm(cpus))
                        dai_link->no_pcm = 1;
+
+               asoc_simple_canonicalize_cpu(cpus, is_single_links);
        } else {
-               struct snd_soc_codec_conf *cconf;
+               struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
+               struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
+               struct device_node *port;
+               struct device_node *ports;
 
                /* CPU is dummy */
 
@@ -327,22 +325,25 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                dai_link->no_pcm                = 1;
                dai_link->be_hw_params_fixup    = asoc_simple_be_hw_params_fixup;
 
-               cconf   = simple_props_to_codec_conf(dai_props, 0);
-
-               ret = graph_parse_node(priv, codec_ep, li, 0);
+               ret = graph_parse_node(priv, codec_ep, li, NULL);
                if (ret < 0)
-                       goto out_put_node;
+                       return ret;
 
                snprintf(dai_name, sizeof(dai_name),
                         "be.%pOFP.%s", codecs->of_node, codecs->dai_name);
 
                /* check "prefix" from top node */
+               port = of_get_parent(ep);
+               ports = of_get_parent(port);
                snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
                                              "prefix");
                if (of_node_name_eq(ports, "ports"))
                        snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node, "prefix");
                snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node,
                                             "prefix");
+
+               of_node_put(ports);
+               of_node_put(port);
        }
 
        graph_parse_convert(dev, ep, &dai_props->adata);
@@ -351,11 +352,8 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
 
        ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
 
-out_put_node:
        li->link++;
 
-       of_node_put(ports);
-       of_node_put(port);
        return ret;
 }
 
@@ -369,20 +367,23 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
        struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
        struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
        char dai_name[64];
-       int ret;
+       int ret, is_single_links = 0;
 
        dev_dbg(dev, "link_of (%pOF)\n", cpu_ep);
 
-       ret = graph_parse_node(priv, cpu_ep, li, 1);
+       ret = graph_parse_node(priv, cpu_ep, li, &is_single_links);
        if (ret < 0)
                return ret;
 
-       ret = graph_parse_node(priv, codec_ep, li, 0);
+       ret = graph_parse_node(priv, codec_ep, li, NULL);
        if (ret < 0)
                return ret;
 
        snprintf(dai_name, sizeof(dai_name),
                 "%s-%s", cpus->dai_name, codecs->dai_name);
+
+       asoc_simple_canonicalize_cpu(cpus, is_single_links);
+
        ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name);
        if (ret < 0)
                return ret;
index a1373be..0015f53 100644 (file)
@@ -93,12 +93,11 @@ static void simple_parse_convert(struct device *dev,
 }
 
 static void simple_parse_mclk_fs(struct device_node *top,
-                                struct device_node *cpu,
-                                struct device_node *codec,
+                                struct device_node *np,
                                 struct simple_dai_props *props,
                                 char *prefix)
 {
-       struct device_node *node = of_get_parent(cpu);
+       struct device_node *node = of_get_parent(np);
        char prop[128];
 
        snprintf(prop, sizeof(prop), "%smclk-fs", PREFIX);
@@ -106,12 +105,71 @@ static void simple_parse_mclk_fs(struct device_node *top,
 
        snprintf(prop, sizeof(prop), "%smclk-fs", prefix);
        of_property_read_u32(node,      prop, &props->mclk_fs);
-       of_property_read_u32(cpu,       prop, &props->mclk_fs);
-       of_property_read_u32(codec,     prop, &props->mclk_fs);
+       of_property_read_u32(np,        prop, &props->mclk_fs);
 
        of_node_put(node);
 }
 
+static int simple_parse_node(struct asoc_simple_priv *priv,
+                            struct device_node *np,
+                            struct link_info *li,
+                            char *prefix,
+                            int *cpu)
+{
+       struct device *dev = simple_priv_to_dev(priv);
+       struct device_node *top = dev->of_node;
+       struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+       struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
+       struct snd_soc_dai_link_component *dlc;
+       struct asoc_simple_dai *dai;
+       int ret;
+
+       if (cpu) {
+               dlc = asoc_link_to_cpu(dai_link, 0);
+               dai = simple_props_to_dai_cpu(dai_props, 0);
+       } else {
+               dlc = asoc_link_to_codec(dai_link, 0);
+               dai = simple_props_to_dai_codec(dai_props, 0);
+       }
+
+       simple_parse_mclk_fs(top, np, dai_props, prefix);
+
+       ret = asoc_simple_parse_dai(np, dlc, cpu);
+       if (ret)
+               return ret;
+
+       ret = asoc_simple_parse_clk(dev, np, dai, dlc);
+       if (ret)
+               return ret;
+
+       ret = asoc_simple_parse_tdm(np, dai);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int simple_link_init(struct asoc_simple_priv *priv,
+                           struct device_node *node,
+                           struct device_node *codec,
+                           struct link_info *li,
+                           char *prefix, char *name)
+{
+       struct device *dev = simple_priv_to_dev(priv);
+       struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
+       int ret;
+
+       ret = asoc_simple_parse_daifmt(dev, node, codec,
+                                      prefix, &dai_link->dai_fmt);
+       if (ret < 0)
+               return 0;
+
+       dai_link->init                  = asoc_simple_dai_init;
+       dai_link->ops                   = &simple_ops;
+
+       return asoc_simple_set_dailink_name(dev, dai_link, name);
+}
+
 static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                                   struct device_node *np,
                                   struct device_node *codec,
@@ -121,24 +179,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
        struct device *dev = simple_priv_to_dev(priv);
        struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
        struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
-       struct asoc_simple_dai *dai;
-       struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
-       struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
-       struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
        struct device_node *top = dev->of_node;
        struct device_node *node = of_get_parent(np);
        char *prefix = "";
+       char dai_name[64];
        int ret;
 
        dev_dbg(dev, "link_of DPCM (%pOF)\n", np);
 
-       li->link++;
-
        /* For single DAI link & old style of DT node */
        if (is_top)
                prefix = PREFIX;
 
        if (li->cpu) {
+               struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
+               struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
                int is_single_links = 0;
 
                /* Codec is dummy */
@@ -147,25 +202,16 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                dai_link->dynamic               = 1;
                dai_link->dpcm_merged_format    = 1;
 
-               dai = simple_props_to_dai_cpu(dai_props, 0);
-
-               ret = asoc_simple_parse_dai(np, cpus, &is_single_links);
-               if (ret)
-                       goto out_put_node;
-
-               ret = asoc_simple_parse_clk(dev, np, dai, cpus);
+               ret = simple_parse_node(priv, np, li, prefix, &is_single_links);
                if (ret < 0)
                        goto out_put_node;
 
-               ret = asoc_simple_set_dailink_name(dev, dai_link,
-                                                  "fe.%s",
-                                                  cpus->dai_name);
-               if (ret < 0)
-                       goto out_put_node;
+               snprintf(dai_name, sizeof(dai_name), "fe.%s", cpus->dai_name);
 
                asoc_simple_canonicalize_cpu(cpus, is_single_links);
                asoc_simple_canonicalize_platform(platforms, cpus);
        } else {
+               struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
                struct snd_soc_codec_conf *cconf;
 
                /* CPU is dummy */
@@ -174,22 +220,13 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
                dai_link->no_pcm                = 1;
                dai_link->be_hw_params_fixup    = asoc_simple_be_hw_params_fixup;
 
-               dai     = simple_props_to_dai_codec(dai_props, 0);
                cconf   = simple_props_to_codec_conf(dai_props, 0);
 
-               ret = asoc_simple_parse_dai(np, codecs, NULL);
+               ret = simple_parse_node(priv, np, li, prefix, NULL);
                if (ret < 0)
                        goto out_put_node;
 
-               ret = asoc_simple_parse_clk(dev, np, dai, codecs);
-               if (ret < 0)
-                       goto out_put_node;
-
-               ret = asoc_simple_set_dailink_name(dev, dai_link,
-                                                  "be.%s",
-                                                  codecs->dai_name);
-               if (ret < 0)
-                       goto out_put_node;
+               snprintf(dai_name, sizeof(dai_name), "be.%s", codecs->dai_name);
 
                /* check "prefix" from top node */
                snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
@@ -201,23 +238,14 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
        }
 
        simple_parse_convert(dev, np, &dai_props->adata);
-       simple_parse_mclk_fs(top, np, codec, dai_props, prefix);
-
-       ret = asoc_simple_parse_tdm(np, dai);
-       if (ret)
-               goto out_put_node;
-
-       ret = asoc_simple_parse_daifmt(dev, node, codec,
-                                      prefix, &dai_link->dai_fmt);
-       if (ret < 0)
-               goto out_put_node;
 
        snd_soc_dai_link_set_capabilities(dai_link);
 
-       dai_link->ops                   = &simple_ops;
-       dai_link->init                  = asoc_simple_dai_init;
+       ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
 
 out_put_node:
+       li->link++;
+
        of_node_put(node);
        return ret;
 }
@@ -230,23 +258,19 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
 {
        struct device *dev = simple_priv_to_dev(priv);
        struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
-       struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
-       struct asoc_simple_dai *cpu_dai = simple_props_to_dai_cpu(dai_props, 0);
-       struct asoc_simple_dai *codec_dai = simple_props_to_dai_codec(dai_props, 0);
        struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0);
        struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0);
        struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0);
-       struct device_node *top = dev->of_node;
        struct device_node *cpu = NULL;
        struct device_node *node = NULL;
        struct device_node *plat = NULL;
+       char dai_name[64];
        char prop[128];
        char *prefix = "";
        int ret, single_cpu = 0;
 
        cpu  = np;
        node = of_get_parent(np);
-       li->link++;
 
        dev_dbg(dev, "link_of (%pOF)\n", node);
 
@@ -257,18 +281,11 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
        snprintf(prop, sizeof(prop), "%splat", prefix);
        plat = of_get_child_by_name(node, prop);
 
-       ret = asoc_simple_parse_daifmt(dev, node, codec,
-                                      prefix, &dai_link->dai_fmt);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       simple_parse_mclk_fs(top, cpu, codec, dai_props, prefix);
-
-       ret = asoc_simple_parse_dai(cpu, cpus, &single_cpu);
+       ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu);
        if (ret < 0)
                goto dai_link_of_err;
 
-       ret = asoc_simple_parse_dai(codec, codecs, NULL);
+       ret = simple_parse_node(priv, codec, li, prefix, NULL);
        if (ret < 0)
                goto dai_link_of_err;
 
@@ -276,39 +293,20 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
        if (ret < 0)
                goto dai_link_of_err;
 
-       ret = asoc_simple_parse_tdm(cpu, cpu_dai);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_parse_tdm(codec, codec_dai);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_parse_clk(dev, cpu, cpu_dai, cpus);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_parse_clk(dev, codec, codec_dai, codecs);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       ret = asoc_simple_set_dailink_name(dev, dai_link,
-                                          "%s-%s",
-                                          cpus->dai_name,
-                                          codecs->dai_name);
-       if (ret < 0)
-               goto dai_link_of_err;
-
-       dai_link->ops = &simple_ops;
-       dai_link->init = asoc_simple_dai_init;
+       snprintf(dai_name, sizeof(dai_name),
+                "%s-%s", cpus->dai_name, codecs->dai_name);
 
        asoc_simple_canonicalize_cpu(cpus, single_cpu);
        asoc_simple_canonicalize_platform(platforms, cpus);
 
+       ret = simple_link_init(priv, node, codec, li, prefix, dai_name);
+
 dai_link_of_err:
        of_node_put(plat);
        of_node_put(node);
 
+       li->link++;
+
        return ret;
 }
 
index df2f5d5..22dbd9d 100644 (file)
@@ -574,6 +574,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_SSP0_AIF1 |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Glavey TM800A550L */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+                       /* Above strings are too generic, also match on BIOS version */
+                       DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+               },
+               .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -652,6 +663,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_MONO_SPEAKER |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Lenovo Miix 3-830 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 3-830"),
+               },
+               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+                                       BYT_RT5640_JD_SRC_JD2_IN4N |
+                                       BYT_RT5640_OVCD_TH_2000UA |
+                                       BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_MONO_SPEAKER |
+                                       BYT_RT5640_DIFF_MIC |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* Linx Linx7 tablet */
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
index c62d261..a6e95db 100644 (file)
@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
                struct snd_soc_dai *dai)
 {
        struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+       struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+       unsigned int id = dai->driver->id;
 
        clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
+       /*
+        * Ensure LRCLK is disabled even in device node validation.
+        * Will not impact if disabled in lpass_cpu_daiops_trigger()
+        * suspend.
+        */
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
+       else
+               regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
+
+       /*
+        * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
+        * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
+        * lpass_cpu_daiops_prepare.
+        */
+       if (drvdata->mi2s_was_prepared[dai->driver->id]) {
+               drvdata->mi2s_was_prepared[dai->driver->id] = false;
+               clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+       }
+
        clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
 }
 
@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               /*
+                * Ensure lpass BCLK/LRCLK is enabled during
+                * device resume as lpass_cpu_daiops_prepare() is not called
+                * after the device resumes. We don't check mi2s_was_prepared before
+                * enable/disable BCLK in trigger events because:
+                *  1. These trigger events are paired, so the BCLK
+                *     enable_count is balanced.
+                *  2. the BCLK can be shared (ex: headset and headset mic),
+                *     we need to increase the enable_count so that we don't
+                *     turn off the shared BCLK while other devices are using
+                *     it.
+                */
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        ret = regmap_fields_write(i2sctl->spken, id,
                                                 LPAIF_I2SCTL_SPKEN_ENABLE);
@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               /*
+                * To ensure lpass BCLK/LRCLK is disabled during
+                * device suspend.
+                */
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        ret = regmap_fields_write(i2sctl->spken, id,
                                                 LPAIF_I2SCTL_SPKEN_DISABLE);
@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        return ret;
 }
 
+static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+               struct snd_soc_dai *dai)
+{
+       struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+       struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
+       unsigned int id = dai->driver->id;
+       int ret;
+
+       /*
+        * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
+        * data flow starts. This allows other codec to have some delay before
+        * the data flow.
+        * (ex: to drop start up pop noise before capture starts).
+        */
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
+       else
+               ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
+
+       if (ret) {
+               dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+               return ret;
+       }
+
+       /*
+        * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
+        * be called multiple times. It's paired with the clk_disable in
+        * lpass_cpu_daiops_shutdown.
+        */
+       if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
+               ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+               if (ret) {
+                       dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+                       return ret;
+               }
+               drvdata->mi2s_was_prepared[dai->driver->id] = true;
+       }
+       return 0;
+}
+
 const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
        .set_sysclk     = lpass_cpu_daiops_set_sysclk,
        .startup        = lpass_cpu_daiops_startup,
        .shutdown       = lpass_cpu_daiops_shutdown,
        .hw_params      = lpass_cpu_daiops_hw_params,
        .trigger        = lpass_cpu_daiops_trigger,
+       .prepare        = lpass_cpu_daiops_prepare,
 };
 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
 
@@ -835,18 +914,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
                if (dai_id == LPASS_DP_RX)
                        continue;
 
-               drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev,
+               drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
                                             variant->dai_osr_clk_names[i]);
-               if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
-                       dev_warn(dev,
-                               "%s() error getting optional %s: %ld\n",
-                               __func__,
-                               variant->dai_osr_clk_names[i],
-                               PTR_ERR(drvdata->mi2s_osr_clk[dai_id]));
-
-                       drvdata->mi2s_osr_clk[dai_id] = NULL;
-               }
-
                drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
                                                variant->dai_bit_clk_names[i]);
                if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
index 83b2e08..7f72214 100644 (file)
@@ -67,6 +67,10 @@ struct lpass_data {
        /* MI2S SD lines to use for playback/capture */
        unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
        unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+
+       /* The state of MI2S prepare dai_ops was called */
+       bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
+
        int hdmi_port_enable;
 
        /* low-power audio interface (LPAIF) registers */
index 1c0904a..a76974c 100644 (file)
@@ -2225,6 +2225,8 @@ static char *fmt_single_name(struct device *dev, int *id)
                return NULL;
 
        name = devm_kstrdup(dev, devname, GFP_KERNEL);
+       if (!name)
+               return NULL;
 
        /* are we a "%s.%d" name (platform and SPI components) */
        found = strstr(name, dev->driver->name);
index 73076d4..4893a56 100644 (file)
@@ -1901,7 +1901,7 @@ static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
  * @src: older version of pcm as a source
  * @pcm: latest version of pcm created from the source
  *
- * Support from vesion 4. User should free the returned pcm manually.
+ * Support from version 4. User should free the returned pcm manually.
  */
 static int pcm_new_ver(struct soc_tplg *tplg,
                       struct snd_soc_tplg_pcm *src,
@@ -2089,7 +2089,7 @@ static void set_link_hw_format(struct snd_soc_dai_link *link,
  * @src: old version of phyical link config as a source
  * @link: latest version of physical link config created from the source
  *
- * Support from vesion 4. User need free the returned link config manually.
+ * Support from version 4. User need free the returned link config manually.
  */
 static int link_new_ver(struct soc_tplg *tplg,
                        struct snd_soc_tplg_link_config *src,
@@ -2400,7 +2400,7 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
  * @src: old version of manifest as a source
  * @manifest: latest version of manifest created from the source
  *
- * Support from vesion 4. Users need free the returned manifest manually.
+ * Support from version 4. Users need free the returned manifest manually.
  */
 static int manifest_new_ver(struct soc_tplg *tplg,
                            struct snd_soc_tplg_manifest *src,
index 8d7bab4..c1f9f0f 100644 (file)
@@ -421,11 +421,16 @@ static int ssp_dai_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
        struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
        struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+       struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
        struct sof_ipc_dai_config *config;
        struct snd_sof_dai *sof_dai;
        struct sof_ipc_reply reply;
        int ret;
 
+       /* DAI_CONFIG IPC during hw_params is not supported in older firmware */
+       if (v->abi_version < SOF_ABI_VER(3, 18, 0))
+               return 0;
+
        list_for_each_entry(sof_dai, &sdev->dai_list, list) {
                if (!sof_dai->cpu_dai_name || !sof_dai->dai_config)
                        continue;
index fd26580..c83fb62 100644 (file)
@@ -256,6 +256,7 @@ suspend:
 
        /* reset FW state */
        sdev->fw_state = SOF_FW_BOOT_NOT_STARTED;
+       sdev->enabled_cores_mask = 0;
 
        return ret;
 }
index c156123..3aa1cf2 100644 (file)
@@ -484,10 +484,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
                dev_err(dev, "mclk register returned %d\n", ret);
                return ret;
        }
-
-       sai->sai_mclk = devm_clk_hw_get_clk(dev, hw, NULL);
-       if (IS_ERR(sai->sai_mclk))
-               return PTR_ERR(sai->sai_mclk);
+       sai->sai_mclk = hw->clk;
 
        /* register mclk provider */
        return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
index e6ff317..2287f8c 100644 (file)
@@ -436,7 +436,7 @@ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface,
        if (snd_BUG_ON(altsetting >= 64 - 8))
                return false;
 
-       err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
+       err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
                              USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
                              UAC2_AS_VAL_ALT_SETTINGS << 8,
                              iface, &raw_data, sizeof(raw_data));
index fda66b2..37ad775 100644 (file)
@@ -3060,7 +3060,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
        case USB_ID(0x1235, 0x8203): /* Focusrite Scarlett 6i6 2nd Gen */
        case USB_ID(0x1235, 0x8204): /* Focusrite Scarlett 18i8 2nd Gen */
        case USB_ID(0x1235, 0x8201): /* Focusrite Scarlett 18i20 2nd Gen */
-               err = snd_scarlett_gen2_controls_create(mixer);
+               err = snd_scarlett_gen2_init(mixer);
                break;
 
        case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
index 560c2ad..4caf379 100644 (file)
@@ -635,7 +635,7 @@ static int scarlett2_usb(
        /* send a second message to get the response */
 
        err = snd_usb_ctl_msg(mixer->chip->dev,
-                       usb_sndctrlpipe(mixer->chip->dev, 0),
+                       usb_rcvctrlpipe(mixer->chip->dev, 0),
                        SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP,
                        USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
                        0,
@@ -1997,38 +1997,11 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer)
        return usb_submit_urb(mixer->urb, GFP_KERNEL);
 }
 
-/* Entry point */
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer,
+                                            const struct scarlett2_device_info *info)
 {
-       const struct scarlett2_device_info *info;
        int err;
 
-       /* only use UAC_VERSION_2 */
-       if (!mixer->protocol)
-               return 0;
-
-       switch (mixer->chip->usb_id) {
-       case USB_ID(0x1235, 0x8203):
-               info = &s6i6_gen2_info;
-               break;
-       case USB_ID(0x1235, 0x8204):
-               info = &s18i8_gen2_info;
-               break;
-       case USB_ID(0x1235, 0x8201):
-               info = &s18i20_gen2_info;
-               break;
-       default: /* device not (yet) supported */
-               return -EINVAL;
-       }
-
-       if (!(mixer->chip->setup & SCARLETT2_ENABLE)) {
-               usb_audio_err(mixer->chip,
-                       "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
-                       "use options snd_usb_audio device_setup=1 "
-                       "to enable and report any issues to g@b4.vu");
-               return 0;
-       }
-
        /* Initialise private data, routing, sequence number */
        err = scarlett2_init_private(mixer, info);
        if (err < 0)
@@ -2073,3 +2046,51 @@ int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
 
        return 0;
 }
+
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
+{
+       struct snd_usb_audio *chip = mixer->chip;
+       const struct scarlett2_device_info *info;
+       int err;
+
+       /* only use UAC_VERSION_2 */
+       if (!mixer->protocol)
+               return 0;
+
+       switch (chip->usb_id) {
+       case USB_ID(0x1235, 0x8203):
+               info = &s6i6_gen2_info;
+               break;
+       case USB_ID(0x1235, 0x8204):
+               info = &s18i8_gen2_info;
+               break;
+       case USB_ID(0x1235, 0x8201):
+               info = &s18i20_gen2_info;
+               break;
+       default: /* device not (yet) supported */
+               return -EINVAL;
+       }
+
+       if (!(chip->setup & SCARLETT2_ENABLE)) {
+               usb_audio_info(chip,
+                       "Focusrite Scarlett Gen 2 Mixer Driver disabled; "
+                       "use options snd_usb_audio vid=0x%04x pid=0x%04x "
+                       "device_setup=1 to enable and report any issues "
+                       "to g@b4.vu",
+                       USB_ID_VENDOR(chip->usb_id),
+                       USB_ID_PRODUCT(chip->usb_id));
+               return 0;
+       }
+
+       usb_audio_info(chip,
+               "Focusrite Scarlett Gen 2 Mixer Driver enabled pid=0x%04x",
+               USB_ID_PRODUCT(chip->usb_id));
+
+       err = snd_scarlett_gen2_controls_create(mixer, info);
+       if (err < 0)
+               usb_audio_err(mixer->chip,
+                             "Error initialising Scarlett Mixer Driver: %d",
+                             err);
+
+       return err;
+}
index 52e1dad..668c6b0 100644 (file)
@@ -2,6 +2,6 @@
 #ifndef __USB_MIXER_SCARLETT_GEN2_H
 #define __USB_MIXER_SCARLETT_GEN2_H
 
-int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer);
+int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
 
 #endif /* __USB_MIXER_SCARLETT_GEN2_H */
diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..d0f4ecd
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+       PERF_REG_MIPS_PC,
+       PERF_REG_MIPS_R1,
+       PERF_REG_MIPS_R2,
+       PERF_REG_MIPS_R3,
+       PERF_REG_MIPS_R4,
+       PERF_REG_MIPS_R5,
+       PERF_REG_MIPS_R6,
+       PERF_REG_MIPS_R7,
+       PERF_REG_MIPS_R8,
+       PERF_REG_MIPS_R9,
+       PERF_REG_MIPS_R10,
+       PERF_REG_MIPS_R11,
+       PERF_REG_MIPS_R12,
+       PERF_REG_MIPS_R13,
+       PERF_REG_MIPS_R14,
+       PERF_REG_MIPS_R15,
+       PERF_REG_MIPS_R16,
+       PERF_REG_MIPS_R17,
+       PERF_REG_MIPS_R18,
+       PERF_REG_MIPS_R19,
+       PERF_REG_MIPS_R20,
+       PERF_REG_MIPS_R21,
+       PERF_REG_MIPS_R22,
+       PERF_REG_MIPS_R23,
+       PERF_REG_MIPS_R24,
+       PERF_REG_MIPS_R25,
+       PERF_REG_MIPS_R26,
+       PERF_REG_MIPS_R27,
+       PERF_REG_MIPS_R28,
+       PERF_REG_MIPS_R29,
+       PERF_REG_MIPS_R30,
+       PERF_REG_MIPS_R31,
+       PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
index b7dd944..8f28faf 100644 (file)
 # define DISABLE_PTI           (1 << (X86_FEATURE_PTI & 31))
 #endif
 
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD        0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD         (1 << (X86_FEATURE_ENQCMD & 31))
 
 #ifdef CONFIG_X86_SGX
 # define DISABLE_SGX   0
index 078cbd2..de7f30f 100644 (file)
@@ -4,4 +4,8 @@
 
 #include "../../../../include/linux/bootconfig.h"
 
+#ifndef fallthrough
+# define fallthrough
+#endif
+
 #endif
index 7362bef..6cd6080 100644 (file)
@@ -399,6 +399,7 @@ static int apply_xbc(const char *path, const char *xbc_path)
        }
        /* TODO: Ensure the @path is initramfs/initrd image */
        if (fstat(fd, &stat) < 0) {
+               ret = -errno;
                pr_err("Failed to get the size of %s\n", path);
                goto out;
        }
index 3fd9a7e..79d9c44 100644 (file)
@@ -8,6 +8,7 @@
  * Note: you must update KVM_API_VERSION if you change this interface.
  */
 
+#include <linux/const.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
@@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd {
  * conversion after harvesting an entry.  Also, it must not skip any
  * dirty bits, so that dirty bits are always harvested in sequence.
  */
-#define KVM_DIRTY_GFN_F_DIRTY           BIT(0)
-#define KVM_DIRTY_GFN_F_RESET           BIT(1)
+#define KVM_DIRTY_GFN_F_DIRTY           _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET           _BITUL(1)
 #define KVM_DIRTY_GFN_F_MASK            0x3
 
 /*
index 6061431..e9b619a 100644 (file)
@@ -1094,7 +1094,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        goto out_put_ctx;
                }
                if (xsk->fd == umem->fd)
-                       umem->rx_ring_setup_done = true;
+                       umem->tx_ring_setup_done = true;
        }
 
        err = xsk_get_mmap_offsets(xsk->fd, &off);
index 24295d3..523aa41 100644 (file)
@@ -747,6 +747,10 @@ int arch_rewrite_retpolines(struct objtool_file *file)
 
        list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
 
+               if (insn->type != INSN_JUMP_DYNAMIC &&
+                   insn->type != INSN_CALL_DYNAMIC)
+                       continue;
+
                if (!strcmp(insn->sec->name, ".text.__x86.indirect_thunk"))
                        continue;
 
index 743c2e9..41bca1d 100644 (file)
@@ -717,7 +717,7 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
 
 struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
 {
-       struct section *symtab;
+       struct section *symtab, *symtab_shndx;
        struct symbol *sym;
        Elf_Data *data;
        Elf_Scn *s;
@@ -769,6 +769,29 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
        symtab->len += data->d_size;
        symtab->changed = true;
 
+       symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+       if (symtab_shndx) {
+               s = elf_getscn(elf->elf, symtab_shndx->idx);
+               if (!s) {
+                       WARN_ELF("elf_getscn");
+                       return NULL;
+               }
+
+               data = elf_newdata(s);
+               if (!data) {
+                       WARN_ELF("elf_newdata");
+                       return NULL;
+               }
+
+               data->d_buf = &sym->sym.st_size; /* conveniently 0 */
+               data->d_size = sizeof(Elf32_Word);
+               data->d_align = 4;
+               data->d_type = ELF_T_WORD;
+
+               symtab_shndx->len += 4;
+               symtab_shndx->changed = true;
+       }
+
        sym->sec = find_section_by_index(elf, 0);
 
        elf_add_symbol(elf, sym);
index 406a951..73df23d 100644 (file)
@@ -90,7 +90,6 @@ endif
 ifeq ($(ARCH),mips)
   NO_PERF_REGS := 0
   CFLAGS += -I$(OUTPUT)arch/mips/include/generated
-  CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi
   LIBUNWIND_LIBS = -lunwind -lunwind-mips
 endif
 
index 3337b5f..84803ab 100644 (file)
@@ -2714,6 +2714,12 @@ int cmd_record(int argc, const char **argv)
                rec->no_buildid = true;
        }
 
+       if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
+               pr_err("Kernel has no cgroup sampling support.\n");
+               err = -EINVAL;
+               goto out_opts;
+       }
+
        if (rec->opts.kcore)
                rec->data.is_dir = true;
 
index dd8ff28..c783558 100755 (executable)
@@ -39,6 +39,7 @@ arch/x86/lib/x86-opcode-map.txt
 arch/x86/tools/gen-insn-attr-x86.awk
 arch/arm/include/uapi/asm/perf_regs.h
 arch/arm64/include/uapi/asm/perf_regs.h
+arch/mips/include/uapi/asm/perf_regs.h
 arch/powerpc/include/uapi/asm/perf_regs.h
 arch/s390/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/perf_regs.h
index 20cb91e..2f6b671 100644 (file)
@@ -443,6 +443,8 @@ int main(int argc, const char **argv)
        const char *cmd;
        char sbuf[STRERR_BUFSIZE];
 
+       perf_debug_setup();
+
        /* libsubcmd init */
        exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
        pager_init(PERF_PAGER_ENVIRONMENT);
@@ -531,8 +533,6 @@ int main(int argc, const char **argv)
         */
        pthread__block_sigwinch();
 
-       perf_debug_setup();
-
        while (1) {
                static int done_help;
 
index 616f290..605be14 100644 (file)
@@ -1,46 +1,56 @@
 [
   {
-    "EventCode": "1003C",
+    "EventCode": "0x1003C",
     "EventName": "PM_EXEC_STALL_DMISS_L2L3",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
   },
   {
-    "EventCode": "34056",
+    "EventCode": "0x1E054",
+    "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+  },
+  {
+    "EventCode": "0x34054",
+    "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+  },
+  {
+    "EventCode": "0x34056",
     "EventName": "PM_EXEC_STALL_LOAD_FINISH",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ."
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
   },
   {
-    "EventCode": "3006C",
+    "EventCode": "0x3006C",
     "EventName": "PM_RUN_CYC_SMT2_MODE",
     "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
   },
   {
-    "EventCode": "300F4",
+    "EventCode": "0x300F4",
     "EventName": "PM_RUN_INST_CMPL_CONC",
     "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
   },
   {
-    "EventCode": "4C016",
+    "EventCode": "0x4C016",
     "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
   },
   {
-    "EventCode": "4D014",
+    "EventCode": "0x4D014",
     "EventName": "PM_EXEC_STALL_LOAD",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "4D016",
+    "EventCode": "0x4D016",
     "EventName": "PM_EXEC_STALL_PTESYNC",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "401EA",
+    "EventCode": "0x401EA",
     "EventName": "PM_THRESH_EXC_128",
     "BriefDescription": "Threshold counter exceeded a value of 128."
   },
   {
-    "EventCode": "400F6",
+    "EventCode": "0x400F6",
     "EventName": "PM_BR_MPRED_CMPL",
     "BriefDescription": "A mispredicted branch completed. Includes direction and target."
   }
index 703cd43..54acb55 100644 (file)
@@ -1,6 +1,6 @@
 [
   {
-    "EventCode": "4016E",
+    "EventCode": "0x4016E",
     "EventName": "PM_THRESH_NOT_MET",
     "BriefDescription": "Threshold counter did not meet threshold."
   }
index eac8609..558f953 100644 (file)
 [
   {
-    "EventCode": "10004",
+    "EventCode": "0x10004",
     "EventName": "PM_EXEC_STALL_TRANSLATION",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
   },
   {
-    "EventCode": "10010",
+    "EventCode": "0x10006",
+    "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+  },
+  {
+    "EventCode": "0x10010",
     "EventName": "PM_PMC4_OVERFLOW",
     "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
   },
   {
-    "EventCode": "10020",
+    "EventCode": "0x10020",
     "EventName": "PM_PMC4_REWIND",
     "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
   },
   {
-    "EventCode": "10038",
+    "EventCode": "0x10038",
     "EventName": "PM_DISP_STALL_TRANSLATION",
     "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
   },
   {
-    "EventCode": "1003A",
+    "EventCode": "0x1003A",
     "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
   },
   {
-    "EventCode": "1E050",
+    "EventCode": "0x1D05E",
+    "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+  },
+  {
+    "EventCode": "0x1E050",
     "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
   },
   {
-    "EventCode": "1F054",
+    "EventCode": "0x1F054",
     "EventName": "PM_DTLB_HIT",
     "BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
   },
   {
-    "EventCode": "101E8",
+    "EventCode": "0x10064",
+    "EventName": "PM_DISP_STALL_IC_L2",
+    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+  },
+  {
+    "EventCode": "0x101E8",
     "EventName": "PM_THRESH_EXC_256",
     "BriefDescription": "Threshold counter exceeded a count of 256."
   },
   {
-    "EventCode": "101EC",
+    "EventCode": "0x101EC",
     "EventName": "PM_THRESH_MET",
     "BriefDescription": "Threshold exceeded."
   },
   {
-    "EventCode": "100F2",
+    "EventCode": "0x100F2",
     "EventName": "PM_1PLUS_PPC_CMPL",
     "BriefDescription": "Cycles in which at least one instruction is completed by this thread."
   },
   {
-    "EventCode": "100F6",
+    "EventCode": "0x100F6",
     "EventName": "PM_IERAT_MISS",
     "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
   },
   {
-    "EventCode": "100F8",
+    "EventCode": "0x100F8",
     "EventName": "PM_DISP_STALL_CYC",
     "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
   },
   {
-    "EventCode": "20114",
+    "EventCode": "0x20006",
+    "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+  },
+  {
+    "EventCode": "0x20114",
     "EventName": "PM_MRK_L2_RC_DISP",
     "BriefDescription": "Marked instruction RC dispatched in L2."
   },
   {
-    "EventCode": "2C010",
+    "EventCode": "0x2C010",
     "EventName": "PM_EXEC_STALL_LSU",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
   },
   {
-    "EventCode": "2C016",
+    "EventCode": "0x2C016",
     "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
     "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
   },
   {
-    "EventCode": "2C01E",
+    "EventCode": "0x2C01E",
     "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
   },
   {
-    "EventCode": "2D01A",
+    "EventCode": "0x2D01A",
     "EventName": "PM_DISP_STALL_IC_MISS",
     "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
   },
   {
-    "EventCode": "2D01C",
-    "EventName": "PM_CMPL_STALL_STCX",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
-  },
-  {
-    "EventCode": "2E018",
+    "EventCode": "0x2E018",
     "EventName": "PM_DISP_STALL_FETCH",
     "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
   },
   {
-    "EventCode": "2E01A",
+    "EventCode": "0x2E01A",
     "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
   },
   {
-    "EventCode": "2C142",
+    "EventCode": "0x2C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "24050",
+    "EventCode": "0x24050",
     "EventName": "PM_IOPS_DISP",
     "BriefDescription": "Internal Operations dispatched. PM_IOPS_DISP / PM_INST_DISP will show the average number of internal operations per PowerPC instruction."
   },
   {
-    "EventCode": "2405E",
+    "EventCode": "0x2405E",
     "EventName": "PM_ISSUE_CANCEL",
     "BriefDescription": "An instruction issued and the issue was later cancelled. Only one cancel per PowerPC instruction."
   },
   {
-    "EventCode": "200FA",
+    "EventCode": "0x200FA",
     "EventName": "PM_BR_TAKEN_CMPL",
     "BriefDescription": "Branch Taken instruction completed."
   },
   {
-    "EventCode": "30012",
+    "EventCode": "0x30004",
+    "EventName": "PM_DISP_STALL_FLUSH",
+    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+  },
+  {
+    "EventCode": "0x3000A",
+    "EventName": "PM_DISP_STALL_ITLB_MISS",
+    "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
+  },
+  {
+    "EventCode": "0x30012",
     "EventName": "PM_FLUSH_COMPLETION",
     "BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
   },
   {
-    "EventCode": "30014",
+    "EventCode": "0x30014",
     "EventName": "PM_EXEC_STALL_STORE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "30018",
+    "EventCode": "0x30018",
     "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
   },
   {
-    "EventCode": "30026",
+    "EventCode": "0x30026",
     "EventName": "PM_EXEC_STALL_STORE_MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
   },
   {
-    "EventCode": "3012A",
+    "EventCode": "0x3012A",
     "EventName": "PM_MRK_L2_RC_DONE",
     "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
   },
   {
-    "EventCode": "3F046",
+    "EventCode": "0x3F046",
     "EventName": "PM_ITLB_HIT_1G",
     "BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "34058",
+    "EventCode": "0x34058",
     "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
     "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
   },
   {
-    "EventCode": "3D05C",
+    "EventCode": "0x3D05C",
     "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
   },
   {
-    "EventCode": "3E052",
+    "EventCode": "0x3E052",
     "EventName": "PM_DISP_STALL_IC_L3",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
   },
   {
-    "EventCode": "3E054",
+    "EventCode": "0x3E054",
     "EventName": "PM_LD_MISS_L1",
     "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
   },
   {
-    "EventCode": "301EA",
+    "EventCode": "0x301EA",
     "EventName": "PM_THRESH_EXC_1024",
     "BriefDescription": "Threshold counter exceeded a value of 1024."
   },
   {
-    "EventCode": "300FA",
+    "EventCode": "0x300FA",
     "EventName": "PM_INST_FROM_L3MISS",
     "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
   },
   {
-    "EventCode": "40006",
+    "EventCode": "0x40006",
     "EventName": "PM_ISSUE_KILL",
     "BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
   },
   {
-    "EventCode": "40116",
+    "EventCode": "0x40116",
     "EventName": "PM_MRK_LARX_FIN",
     "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "4C010",
+    "EventCode": "0x4C010",
     "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
   },
   {
-    "EventCode": "4D01E",
+    "EventCode": "0x4D01E",
     "EventName": "PM_DISP_STALL_BR_MPRED",
     "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
   },
   {
-    "EventCode": "4E010",
+    "EventCode": "0x4E010",
     "EventName": "PM_DISP_STALL_IC_L3MISS",
     "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
   },
   {
-    "EventCode": "4E01A",
+    "EventCode": "0x4E01A",
     "EventName": "PM_DISP_STALL_HELD_CYC",
     "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
   },
   {
-    "EventCode": "44056",
+    "EventCode": "0x4003C",
+    "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+  },
+  {
+    "EventCode": "0x44056",
     "EventName": "PM_VECTOR_ST_CMPL",
     "BriefDescription": "Vector store instructions completed."
   }
index 016d8de..b5a0d65 100644 (file)
@@ -1,11 +1,11 @@
 [
   {
-    "EventCode": "1E058",
+    "EventCode": "0x1E058",
     "EventName": "PM_STCX_FAIL_FIN",
     "BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "4E050",
+    "EventCode": "0x4E050",
     "EventName": "PM_STCX_PASS_FIN",
     "BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock."
   }
index 93a5a59..58b5dfe 100644 (file)
 [
   {
-    "EventCode": "1002C",
+    "EventCode": "0x1002C",
     "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
     "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
   },
   {
-    "EventCode": "10132",
+    "EventCode": "0x10132",
     "EventName": "PM_MRK_INST_ISSUED",
     "BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
   },
   {
-    "EventCode": "101E0",
+    "EventCode": "0x101E0",
     "EventName": "PM_MRK_INST_DISP",
     "BriefDescription": "The thread has dispatched a randomly sampled marked instruction."
   },
   {
-    "EventCode": "101E2",
+    "EventCode": "0x101E2",
     "EventName": "PM_MRK_BR_TAKEN_CMPL",
     "BriefDescription": "Marked Branch Taken instruction completed."
   },
   {
-    "EventCode": "20112",
+    "EventCode": "0x20112",
     "EventName": "PM_MRK_NTF_FIN",
     "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
   },
   {
-    "EventCode": "2C01C",
+    "EventCode": "0x2C01C",
     "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
   },
   {
-    "EventCode": "20138",
+    "EventCode": "0x20138",
     "EventName": "PM_MRK_ST_NEST",
     "BriefDescription": "A store has been sampled/marked and is at the point of execution where it has completed in the core and can no longer be flushed. At this point the store is sent to the L2."
   },
   {
-    "EventCode": "2013A",
+    "EventCode": "0x2013A",
     "EventName": "PM_MRK_BRU_FIN",
     "BriefDescription": "Marked Branch instruction finished."
   },
   {
-    "EventCode": "2C144",
+    "EventCode": "0x2C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]."
   },
   {
-    "EventCode": "24156",
+    "EventCode": "0x24156",
     "EventName": "PM_MRK_STCX_FIN",
     "BriefDescription": "Marked conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "24158",
+    "EventCode": "0x24158",
     "EventName": "PM_MRK_INST",
     "BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens."
   },
   {
-    "EventCode": "2415C",
+    "EventCode": "0x2415C",
     "EventName": "PM_MRK_BR_CMPL",
     "BriefDescription": "A marked branch completed. All branches are included."
   },
   {
-    "EventCode": "200FD",
+    "EventCode": "0x200FD",
     "EventName": "PM_L1_ICACHE_MISS",
     "BriefDescription": "Demand iCache Miss."
   },
   {
-    "EventCode": "30130",
+    "EventCode": "0x30130",
     "EventName": "PM_MRK_INST_FIN",
     "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
   },
   {
-    "EventCode": "34146",
+    "EventCode": "0x34146",
     "EventName": "PM_MRK_LD_CMPL",
     "BriefDescription": "Marked loads completed."
   },
   {
-    "EventCode": "3E158",
+    "EventCode": "0x3E158",
     "EventName": "PM_MRK_STCX_FAIL",
     "BriefDescription": "Marked conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "3E15A",
+    "EventCode": "0x3E15A",
     "EventName": "PM_MRK_ST_FIN",
     "BriefDescription": "The marked instruction was a store of any kind."
   },
   {
-    "EventCode": "30068",
+    "EventCode": "0x30068",
     "EventName": "PM_L1_ICACHE_RELOADED_PREF",
     "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
   },
   {
-    "EventCode": "301E4",
+    "EventCode": "0x301E4",
     "EventName": "PM_MRK_BR_MPRED_CMPL",
     "BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
   },
   {
-    "EventCode": "300F6",
+    "EventCode": "0x300F6",
     "EventName": "PM_LD_DEMAND_MISS_L1",
     "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
   },
   {
-    "EventCode": "300FE",
+    "EventCode": "0x300FE",
     "EventName": "PM_DATA_FROM_L3MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
   },
   {
-    "EventCode": "40012",
+    "EventCode": "0x40012",
     "EventName": "PM_L1_ICACHE_RELOADED_ALL",
     "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
   },
   {
-    "EventCode": "40134",
+    "EventCode": "0x40134",
     "EventName": "PM_MRK_INST_TIMEO",
     "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
   },
   {
-    "EventCode": "4003C",
-    "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
-  },
-  {
-    "EventCode": "4505A",
+    "EventCode": "0x4505A",
     "EventName": "PM_SP_FLOP_CMPL",
     "BriefDescription": "Single Precision floating point instructions completed."
   },
   {
-    "EventCode": "4D058",
+    "EventCode": "0x4D058",
     "EventName": "PM_VECTOR_FLOP_CMPL",
     "BriefDescription": "Vector floating point instructions completed."
   },
   {
-    "EventCode": "4D05A",
+    "EventCode": "0x4D05A",
     "EventName": "PM_NON_MATH_FLOP_CMPL",
     "BriefDescription": "Non Math instructions completed."
   },
   {
-    "EventCode": "401E0",
+    "EventCode": "0x401E0",
     "EventName": "PM_MRK_INST_CMPL",
     "BriefDescription": "marked instruction completed."
   },
   {
-    "EventCode": "400FE",
+    "EventCode": "0x400FE",
     "EventName": "PM_DATA_FROM_MEMORY",
     "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
   }
index b01141e..843b51f 100644 (file)
 [
   {
-    "EventCode": "1000A",
+    "EventCode": "0x1000A",
     "EventName": "PM_PMC3_REWIND",
     "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
   },
   {
-    "EventCode": "1C040",
+    "EventCode": "0x1C040",
     "EventName": "PM_XFER_FROM_SRC_PMC1",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "1C142",
+    "EventCode": "0x1C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "1C144",
+    "EventCode": "0x1C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
   },
   {
-    "EventCode": "1C056",
+    "EventCode": "0x1C056",
     "EventName": "PM_DERAT_MISS_4K",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "1C058",
+    "EventCode": "0x1C058",
     "EventName": "PM_DTLB_MISS_16G",
     "BriefDescription": "Data TLB reload (after a miss) page size 16G. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "1C05C",
+    "EventCode": "0x1C05C",
     "EventName": "PM_DTLB_MISS_2M",
     "BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "1E056",
+    "EventCode": "0x1E056",
     "EventName": "PM_EXEC_STALL_STORE_PIPE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
   },
   {
-    "EventCode": "1F150",
+    "EventCode": "0x1F150",
     "EventName": "PM_MRK_ST_L2_CYC",
     "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
   },
   {
-    "EventCode": "10062",
+    "EventCode": "0x10062",
     "EventName": "PM_LD_L3MISS_PEND_CYC",
     "BriefDescription": "Cycles L3 miss was pending for this thread."
   },
   {
-    "EventCode": "20010",
+    "EventCode": "0x20010",
     "EventName": "PM_PMC1_OVERFLOW",
     "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
   },
   {
-    "EventCode": "2001A",
+    "EventCode": "0x2001A",
     "EventName": "PM_ITLB_HIT",
     "BriefDescription": "The PTE required to translate the instruction address was resident in the TLB (instruction TLB access/IERAT reload). Applies to both HPT and RPT. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "2003E",
+    "EventCode": "0x2003E",
     "EventName": "PM_PTESYNC_FIN",
     "BriefDescription": "Ptesync instruction finished in the store unit. Only one ptesync can finish at a time."
   },
   {
-    "EventCode": "2C040",
+    "EventCode": "0x2C040",
     "EventName": "PM_XFER_FROM_SRC_PMC2",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "2C054",
+    "EventCode": "0x2C054",
     "EventName": "PM_DERAT_MISS_64K",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "2C056",
+    "EventCode": "0x2C056",
     "EventName": "PM_DTLB_MISS_4K",
     "BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "2D154",
+    "EventCode": "0x2D154",
     "EventName": "PM_MRK_DERAT_MISS_64K",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "200F6",
+    "EventCode": "0x200F6",
     "EventName": "PM_DERAT_MISS",
     "BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "3000A",
-    "EventName": "PM_DISP_STALL_ITLB_MISS",
-    "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
-  },
-  {
-    "EventCode": "30016",
+    "EventCode": "0x30016",
     "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
   },
   {
-    "EventCode": "3C040",
+    "EventCode": "0x3C040",
     "EventName": "PM_XFER_FROM_SRC_PMC3",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "3C142",
+    "EventCode": "0x3C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "3C144",
+    "EventCode": "0x3C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
   },
   {
-    "EventCode": "3C054",
+    "EventCode": "0x3C054",
     "EventName": "PM_DERAT_MISS_16M",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "3C056",
+    "EventCode": "0x3C056",
     "EventName": "PM_DTLB_MISS_64K",
     "BriefDescription": "Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "3C058",
+    "EventCode": "0x3C058",
     "EventName": "PM_LARX_FIN",
     "BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "301E2",
+    "EventCode": "0x301E2",
     "EventName": "PM_MRK_ST_CMPL",
     "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
   },
   {
-    "EventCode": "300FC",
+    "EventCode": "0x300FC",
     "EventName": "PM_DTLB_MISS",
     "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
   },
   {
-    "EventCode": "4D02C",
+    "EventCode": "0x4D02C",
     "EventName": "PM_PMC1_REWIND",
     "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
   },
   {
-    "EventCode": "4003E",
+    "EventCode": "0x4003E",
     "EventName": "PM_LD_CMPL",
     "BriefDescription": "Loads completed."
   },
   {
-    "EventCode": "4C040",
+    "EventCode": "0x4C040",
     "EventName": "PM_XFER_FROM_SRC_PMC4",
     "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "4C142",
+    "EventCode": "0x4C142",
     "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
     "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
   },
   {
-    "EventCode": "4C144",
+    "EventCode": "0x4C144",
     "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
     "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
   },
   {
-    "EventCode": "4C056",
+    "EventCode": "0x4C056",
     "EventName": "PM_DTLB_MISS_16M",
     "BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "4C05A",
+    "EventCode": "0x4C05A",
     "EventName": "PM_DTLB_MISS_1G",
     "BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "4C15E",
+    "EventCode": "0x4C15E",
     "EventName": "PM_MRK_DTLB_MISS_64K",
     "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "4D056",
+    "EventCode": "0x4D056",
     "EventName": "PM_NON_FMA_FLOP_CMPL",
     "BriefDescription": "Non FMA instruction completed."
   },
   {
-    "EventCode": "40164",
+    "EventCode": "0x40164",
     "EventName": "PM_MRK_DERAT_MISS_2M",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   }
index a119e56..7d0de1a 100644 (file)
 [
   {
-    "EventCode": "10016",
+    "EventCode": "0x10016",
     "EventName": "PM_VSU0_ISSUE",
     "BriefDescription": "VSU instructions issued to VSU pipe 0."
   },
   {
-    "EventCode": "1001C",
+    "EventCode": "0x1001C",
     "EventName": "PM_ULTRAVISOR_INST_CMPL",
     "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
   },
   {
-    "EventCode": "100F0",
+    "EventCode": "0x100F0",
     "EventName": "PM_CYC",
     "BriefDescription": "Processor cycles."
   },
   {
-    "EventCode": "10134",
+    "EventCode": "0x10134",
     "EventName": "PM_MRK_ST_DONE_L2",
     "BriefDescription": "Marked stores completed in L2 (RC machine done)."
   },
   {
-    "EventCode": "1505E",
+    "EventCode": "0x1505E",
     "EventName": "PM_LD_HIT_L1",
     "BriefDescription": "Loads that finished without experiencing an L1 miss."
   },
   {
-    "EventCode": "1D05E",
-    "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
-  },
-  {
-    "EventCode": "1E054",
-    "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
-  },
-  {
-    "EventCode": "1E05A",
-    "EventName": "PM_CMPL_STALL_LWSYNC",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
-  },
-  {
-    "EventCode": "1F056",
+    "EventCode": "0x1F056",
     "EventName": "PM_DISP_SS0_2_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
   },
   {
-    "EventCode": "1F15C",
+    "EventCode": "0x1F15C",
     "EventName": "PM_MRK_STCX_L2_CYC",
     "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
   },
   {
-    "EventCode": "10066",
+    "EventCode": "0x10066",
     "EventName": "PM_ADJUNCT_CYC",
     "BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
   },
   {
-    "EventCode": "101E4",
+    "EventCode": "0x101E4",
     "EventName": "PM_MRK_L1_ICACHE_MISS",
     "BriefDescription": "Marked Instruction suffered an icache Miss."
   },
   {
-    "EventCode": "101EA",
+    "EventCode": "0x101EA",
     "EventName": "PM_MRK_L1_RELOAD_VALID",
     "BriefDescription": "Marked demand reload."
   },
   {
-    "EventCode": "100F4",
+    "EventCode": "0x100F4",
     "EventName": "PM_FLOP_CMPL",
     "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
   },
   {
-    "EventCode": "100FA",
+    "EventCode": "0x100FA",
     "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
     "BriefDescription": "Cycles when at least one thread has the run latch set."
   },
   {
-    "EventCode": "100FC",
+    "EventCode": "0x100FC",
     "EventName": "PM_LD_REF_L1",
     "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
   },
   {
-    "EventCode": "20006",
-    "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
-  },
-  {
-    "EventCode": "2000C",
+    "EventCode": "0x2000C",
     "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
     "BriefDescription": "Cycles when the run latch is set for all threads."
   },
   {
-    "EventCode": "2E010",
+    "EventCode": "0x2E010",
     "EventName": "PM_ADJUNCT_INST_CMPL",
     "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
   },
   {
-    "EventCode": "2E014",
+    "EventCode": "0x2E014",
     "EventName": "PM_STCX_FIN",
     "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
   },
   {
-    "EventCode": "20130",
+    "EventCode": "0x20130",
     "EventName": "PM_MRK_INST_DECODED",
     "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
   },
   {
-    "EventCode": "20132",
+    "EventCode": "0x20132",
     "EventName": "PM_MRK_DFU_ISSUE",
     "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
   },
   {
-    "EventCode": "20134",
+    "EventCode": "0x20134",
     "EventName": "PM_MRK_FXU_ISSUE",
     "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
   },
   {
-    "EventCode": "2505C",
+    "EventCode": "0x2505C",
     "EventName": "PM_VSU_ISSUE",
     "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
   },
   {
-    "EventCode": "2F054",
+    "EventCode": "0x2F054",
     "EventName": "PM_DISP_SS1_2_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
   },
   {
-    "EventCode": "2F056",
+    "EventCode": "0x2F056",
     "EventName": "PM_DISP_SS1_4_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
   },
   {
-    "EventCode": "2006C",
+    "EventCode": "0x2006C",
     "EventName": "PM_RUN_CYC_SMT4_MODE",
     "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
   },
   {
-    "EventCode": "201E0",
+    "EventCode": "0x201E0",
     "EventName": "PM_MRK_DATA_FROM_MEMORY",
     "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
   },
   {
-    "EventCode": "201E4",
+    "EventCode": "0x201E4",
     "EventName": "PM_MRK_DATA_FROM_L3MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
   },
   {
-    "EventCode": "201E8",
+    "EventCode": "0x201E8",
     "EventName": "PM_THRESH_EXC_512",
     "BriefDescription": "Threshold counter exceeded a value of 512."
   },
   {
-    "EventCode": "200F2",
+    "EventCode": "0x200F2",
     "EventName": "PM_INST_DISP",
     "BriefDescription": "PowerPC instructions dispatched."
   },
   {
-    "EventCode": "30132",
+    "EventCode": "0x30132",
     "EventName": "PM_MRK_VSU_FIN",
     "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
   },
   {
-    "EventCode": "30038",
+    "EventCode": "0x30038",
     "EventName": "PM_EXEC_STALL_DMISS_LMEM",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
   },
   {
-    "EventCode": "3F04A",
+    "EventCode": "0x3F04A",
     "EventName": "PM_LSU_ST5_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST2 port."
   },
   {
-    "EventCode": "34054",
-    "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
-  },
-  {
-    "EventCode": "3405A",
+    "EventCode": "0x3405A",
     "EventName": "PM_PRIVILEGED_INST_CMPL",
     "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
   },
   {
-    "EventCode": "3F150",
+    "EventCode": "0x3F150",
     "EventName": "PM_MRK_ST_DRAIN_CYC",
     "BriefDescription": "cycles to drain st from core to L2."
   },
   {
-    "EventCode": "3F054",
+    "EventCode": "0x3F054",
     "EventName": "PM_DISP_SS0_4_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
   },
   {
-    "EventCode": "3F056",
+    "EventCode": "0x3F056",
     "EventName": "PM_DISP_SS0_8_INSTR_CYC",
     "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
   },
   {
-    "EventCode": "30162",
+    "EventCode": "0x30162",
     "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
     "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
   },
   {
-    "EventCode": "40114",
+    "EventCode": "0x40114",
     "EventName": "PM_MRK_START_PROBE_NOP_DISP",
     "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
   },
   {
-    "EventCode": "4001C",
+    "EventCode": "0x4001C",
     "EventName": "PM_VSU_FIN",
     "BriefDescription": "VSU instructions finished."
   },
   {
-    "EventCode": "4C01A",
+    "EventCode": "0x4C01A",
     "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
   },
   {
-    "EventCode": "4D012",
+    "EventCode": "0x4D012",
     "EventName": "PM_PMC3_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
   },
   {
-    "EventCode": "4D022",
+    "EventCode": "0x4D022",
     "EventName": "PM_HYPERVISOR_INST_CMPL",
     "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
   },
   {
-    "EventCode": "4D026",
+    "EventCode": "0x4D026",
     "EventName": "PM_ULTRAVISOR_CYC",
     "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
   },
   {
-    "EventCode": "4D028",
+    "EventCode": "0x4D028",
     "EventName": "PM_PRIVILEGED_CYC",
     "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
   },
   {
-    "EventCode": "40030",
+    "EventCode": "0x40030",
     "EventName": "PM_INST_FIN",
     "BriefDescription": "Instructions finished."
   },
   {
-    "EventCode": "44146",
+    "EventCode": "0x44146",
     "EventName": "PM_MRK_STCX_CORE_CYC",
     "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
   },
   {
-    "EventCode": "44054",
+    "EventCode": "0x44054",
     "EventName": "PM_VECTOR_LD_CMPL",
     "BriefDescription": "Vector load instructions completed."
   },
   {
-    "EventCode": "45054",
+    "EventCode": "0x45054",
     "EventName": "PM_FMA_CMPL",
     "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
   },
   {
-    "EventCode": "45056",
+    "EventCode": "0x45056",
     "EventName": "PM_SCALAR_FLOP_CMPL",
     "BriefDescription": "Scalar floating point instructions completed."
   },
   {
-    "EventCode": "4505C",
+    "EventCode": "0x4505C",
     "EventName": "PM_MATH_FLOP_CMPL",
     "BriefDescription": "Math floating point instructions completed."
   },
   {
-    "EventCode": "4D05E",
+    "EventCode": "0x4D05E",
     "EventName": "PM_BR_CMPL",
     "BriefDescription": "A branch completed. All branches are included."
   },
   {
-    "EventCode": "4E15E",
+    "EventCode": "0x4E15E",
     "EventName": "PM_MRK_INST_FLUSHED",
     "BriefDescription": "The marked instruction was flushed."
   },
   {
-    "EventCode": "401E6",
+    "EventCode": "0x401E6",
     "EventName": "PM_MRK_INST_FROM_L3MISS",
     "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
   },
   {
-    "EventCode": "401E8",
+    "EventCode": "0x401E8",
     "EventName": "PM_MRK_DATA_FROM_L2MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
   },
   {
-    "EventCode": "400F0",
+    "EventCode": "0x400F0",
     "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
     "BriefDescription": "Load Missed L1, counted at finish time."
   },
   {
-    "EventCode": "400FA",
+    "EventCode": "0x400FA",
     "EventName": "PM_RUN_INST_CMPL",
     "BriefDescription": "Completed PowerPC instructions gated by the run latch."
   }
index b61b5cc..b8aded6 100644 (file)
 [
   {
-    "EventCode": "100FE",
+    "EventCode": "0x100FE",
     "EventName": "PM_INST_CMPL",
     "BriefDescription": "PowerPC instructions completed."
   },
   {
-    "EventCode": "10006",
-    "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
-    "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
-  },
-  {
-    "EventCode": "1000C",
+    "EventCode": "0x1000C",
     "EventName": "PM_LSU_LD0_FIN",
     "BriefDescription": "LSU Finished an internal operation in LD0 port."
   },
   {
-    "EventCode": "1000E",
+    "EventCode": "0x1000E",
     "EventName": "PM_MMA_ISSUED",
     "BriefDescription": "MMA instructions issued."
   },
   {
-    "EventCode": "10012",
+    "EventCode": "0x10012",
     "EventName": "PM_LSU_ST0_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST0 port."
   },
   {
-    "EventCode": "10014",
+    "EventCode": "0x10014",
     "EventName": "PM_LSU_ST4_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST4 port."
   },
   {
-    "EventCode": "10018",
+    "EventCode": "0x10018",
     "EventName": "PM_IC_DEMAND_CYC",
     "BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
   },
   {
-    "EventCode": "10022",
+    "EventCode": "0x10022",
     "EventName": "PM_PMC2_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
   },
   {
-    "EventCode": "10024",
+    "EventCode": "0x10024",
     "EventName": "PM_PMC5_OVERFLOW",
     "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
   },
   {
-    "EventCode": "10058",
+    "EventCode": "0x10058",
     "EventName": "PM_EXEC_STALL_FIN_AT_DISP",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline finished at dispatch and did not require execution in the LSU, BRU or VSU."
   },
   {
-    "EventCode": "1005A",
+    "EventCode": "0x1005A",
     "EventName": "PM_FLUSH_MPRED",
     "BriefDescription": "A flush occurred due to a mispredicted branch. Includes target and direction."
   },
   {
-    "EventCode": "1C05A",
+    "EventCode": "0x1C05A",
     "EventName": "PM_DERAT_MISS_2M",
     "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
   },
   {
-    "EventCode": "10064",
-    "EventName": "PM_DISP_STALL_IC_L2",
-    "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+    "EventCode": "0x1E05A",
+    "EventName": "PM_CMPL_STALL_LWSYNC",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
   },
   {
-    "EventCode": "10068",
+    "EventCode": "0x10068",
     "EventName": "PM_BR_FIN",
     "BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional."
   },
   {
-    "EventCode": "1006A",
+    "EventCode": "0x1006A",
     "EventName": "PM_FX_LSU_FIN",
     "BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
   },
   {
-    "EventCode": "1006C",
+    "EventCode": "0x1006C",
     "EventName": "PM_RUN_CYC_ST_MODE",
     "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
   },
   {
-    "EventCode": "20004",
+    "EventCode": "0x20004",
     "EventName": "PM_ISSUE_STALL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
   },
   {
-    "EventCode": "2000A",
+    "EventCode": "0x2000A",
     "EventName": "PM_HYPERVISOR_CYC",
     "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
   },
   {
-    "EventCode": "2000E",
+    "EventCode": "0x2000E",
     "EventName": "PM_LSU_LD1_FIN",
     "BriefDescription": "LSU Finished an internal operation in LD1 port."
   },
   {
-    "EventCode": "2C014",
+    "EventCode": "0x2C014",
     "EventName": "PM_CMPL_STALL_SPECIAL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
   },
   {
-    "EventCode": "2C018",
+    "EventCode": "0x2C018",
     "EventName": "PM_EXEC_STALL_DMISS_L3MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
   },
   {
-    "EventCode": "2D010",
+    "EventCode": "0x2D010",
     "EventName": "PM_LSU_ST1_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST1 port."
   },
   {
-    "EventCode": "2D012",
+    "EventCode": "0x2D012",
     "EventName": "PM_VSU1_ISSUE",
     "BriefDescription": "VSU instructions issued to VSU pipe 1."
   },
   {
-    "EventCode": "2D018",
+    "EventCode": "0x2D018",
     "EventName": "PM_EXEC_STALL_VSU",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
   },
   {
-    "EventCode": "2E01E",
+    "EventCode": "0x2D01C",
+    "EventName": "PM_CMPL_STALL_STCX",
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
+  },
+  {
+    "EventCode": "0x2E01E",
     "EventName": "PM_EXEC_STALL_NTC_FLUSH",
-    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children."
+    "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
   },
   {
-    "EventCode": "2013C",
+    "EventCode": "0x2013C",
     "EventName": "PM_MRK_FX_LSU_FIN",
     "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
   },
   {
-    "EventCode": "2405A",
+    "EventCode": "0x2405A",
     "EventName": "PM_NTC_FIN",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
   },
   {
-    "EventCode": "201E2",
+    "EventCode": "0x201E2",
     "EventName": "PM_MRK_LD_MISS_L1",
     "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
   },
   {
-    "EventCode": "200F4",
+    "EventCode": "0x200F4",
     "EventName": "PM_RUN_CYC",
     "BriefDescription": "Processor cycles gated by the run latch."
   },
   {
-    "EventCode": "30004",
-    "EventName": "PM_DISP_STALL_FLUSH",
-    "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
-  },
-  {
-    "EventCode": "30008",
+    "EventCode": "0x30008",
     "EventName": "PM_EXEC_STALL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
   },
   {
-    "EventCode": "3001A",
+    "EventCode": "0x3001A",
     "EventName": "PM_LSU_ST2_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST2 port."
   },
   {
-    "EventCode": "30020",
+    "EventCode": "0x30020",
     "EventName": "PM_PMC2_REWIND",
     "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
   },
   {
-    "EventCode": "30022",
+    "EventCode": "0x30022",
     "EventName": "PM_PMC4_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
   },
   {
-    "EventCode": "30024",
+    "EventCode": "0x30024",
     "EventName": "PM_PMC6_OVERFLOW",
     "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
   },
   {
-    "EventCode": "30028",
+    "EventCode": "0x30028",
     "EventName": "PM_CMPL_STALL_MEM_ECC",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
   },
   {
-    "EventCode": "30036",
+    "EventCode": "0x30036",
     "EventName": "PM_EXEC_STALL_SIMPLE_FX",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "3003A",
+    "EventCode": "0x3003A",
     "EventName": "PM_CMPL_STALL_EXCEPTION",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete."
   },
   {
-    "EventCode": "3F044",
+    "EventCode": "0x3F044",
     "EventName": "PM_VSU2_ISSUE",
     "BriefDescription": "VSU instructions issued to VSU pipe 2."
   },
   {
-    "EventCode": "30058",
+    "EventCode": "0x30058",
     "EventName": "PM_TLBIE_FIN",
     "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
   },
   {
-    "EventCode": "3D058",
+    "EventCode": "0x3D058",
     "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
     "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
   },
   {
-    "EventCode": "30066",
+    "EventCode": "0x30066",
     "EventName": "PM_LSU_FIN",
     "BriefDescription": "LSU Finished an internal operation (up to 4 per cycle)."
   },
   {
-    "EventCode": "40004",
+    "EventCode": "0x40004",
     "EventName": "PM_FXU_ISSUE",
     "BriefDescription": "A fixed point instruction was issued to the VSU."
   },
   {
-    "EventCode": "40008",
+    "EventCode": "0x40008",
     "EventName": "PM_NTC_ALL_FIN",
     "BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
   },
   {
-    "EventCode": "40010",
+    "EventCode": "0x40010",
     "EventName": "PM_PMC3_OVERFLOW",
     "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
   },
   {
-    "EventCode": "4C012",
+    "EventCode": "0x4C012",
     "EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
   },
   {
-    "EventCode": "4C018",
+    "EventCode": "0x4C018",
     "EventName": "PM_CMPL_STALL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
   },
   {
-    "EventCode": "4C01E",
+    "EventCode": "0x4C01E",
     "EventName": "PM_LSU_ST3_FIN",
     "BriefDescription": "LSU Finished an internal operation in ST3 port."
   },
   {
-    "EventCode": "4D018",
+    "EventCode": "0x4D018",
     "EventName": "PM_EXEC_STALL_BRU",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit."
   },
   {
-    "EventCode": "4D01A",
+    "EventCode": "0x4D01A",
     "EventName": "PM_CMPL_STALL_HWSYNC",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a hwsync waiting for response from L2 before completing."
   },
   {
-    "EventCode": "4D01C",
+    "EventCode": "0x4D01C",
     "EventName": "PM_EXEC_STALL_TLBIEL",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
   },
   {
-    "EventCode": "4E012",
+    "EventCode": "0x4E012",
     "EventName": "PM_EXEC_STALL_UNKNOWN",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
   },
   {
-    "EventCode": "4D020",
+    "EventCode": "0x4D020",
     "EventName": "PM_VSU3_ISSUE",
     "BriefDescription": "VSU instruction was issued to VSU pipe 3."
   },
   {
-    "EventCode": "40132",
+    "EventCode": "0x40132",
     "EventName": "PM_MRK_LSU_FIN",
     "BriefDescription": "LSU marked instruction finish."
   },
   {
-    "EventCode": "45058",
+    "EventCode": "0x45058",
     "EventName": "PM_IC_MISS_CMPL",
     "BriefDescription": "Non-speculative icache miss, counted at completion."
   },
   {
-    "EventCode": "4D050",
+    "EventCode": "0x4D050",
     "EventName": "PM_VSU_NON_FLOP_CMPL",
     "BriefDescription": "Non-floating point VSU instructions completed."
   },
   {
-    "EventCode": "4D052",
+    "EventCode": "0x4D052",
     "EventName": "PM_2FLOP_CMPL",
     "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
   },
   {
-    "EventCode": "400F2",
+    "EventCode": "0x400F2",
     "EventName": "PM_1PLUS_PPC_DISP",
     "BriefDescription": "Cycles at least one Instr Dispatched."
   },
   {
-    "EventCode": "400F8",
+    "EventCode": "0x400F8",
     "EventName": "PM_FLUSH",
     "BriefDescription": "Flush (any type)."
   }
index ea122a9..b5d1bd3 100644 (file)
@@ -1,21 +1,21 @@
 [
   {
-    "EventCode": "301E8",
+    "EventCode": "0x301E8",
     "EventName": "PM_THRESH_EXC_64",
     "BriefDescription": "Threshold counter exceeded a value of 64."
   },
   {
-    "EventCode": "45050",
+    "EventCode": "0x45050",
     "EventName": "PM_1FLOP_CMPL",
     "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
   },
   {
-    "EventCode": "45052",
+    "EventCode": "0x45052",
     "EventName": "PM_4FLOP_CMPL",
     "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
   },
   {
-    "EventCode": "4D054",
+    "EventCode": "0x4D054",
     "EventName": "PM_8FLOP_CMPL",
     "BriefDescription": "Four Double Precision vector instructions completed."
   }
index 5a714e3..db3766d 100644 (file)
@@ -1,56 +1,56 @@
 [
   {
-    "EventCode": "1F15E",
+    "EventCode": "0x1F15E",
     "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
     "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
   },
   {
-    "EventCode": "20016",
+    "EventCode": "0x20016",
     "EventName": "PM_ST_FIN",
     "BriefDescription": "Store finish count. Includes speculative activity."
   },
   {
-    "EventCode": "20018",
+    "EventCode": "0x20018",
     "EventName": "PM_ST_FWD",
     "BriefDescription": "Store forwards that finished."
   },
   {
-    "EventCode": "2011C",
+    "EventCode": "0x2011C",
     "EventName": "PM_MRK_NTF_CYC",
     "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
   },
   {
-    "EventCode": "2E01C",
+    "EventCode": "0x2E01C",
     "EventName": "PM_EXEC_STALL_TLBIE",
     "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
   },
   {
-    "EventCode": "201E6",
+    "EventCode": "0x201E6",
     "EventName": "PM_THRESH_EXC_32",
     "BriefDescription": "Threshold counter exceeded a value of 32."
   },
   {
-    "EventCode": "200F0",
+    "EventCode": "0x200F0",
     "EventName": "PM_ST_CMPL",
     "BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
   },
   {
-    "EventCode": "200FE",
+    "EventCode": "0x200FE",
     "EventName": "PM_DATA_FROM_L2MISS",
     "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
   },
   {
-    "EventCode": "30010",
+    "EventCode": "0x30010",
     "EventName": "PM_PMC2_OVERFLOW",
     "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
   },
   {
-    "EventCode": "4D010",
+    "EventCode": "0x4D010",
     "EventName": "PM_PMC1_SAVED",
     "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
   },
   {
-    "EventCode": "4D05C",
+    "EventCode": "0x4D05C",
     "EventName": "PM_DPP_FLOP_CMPL",
     "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
   }
index 7422b0e..9604446 100644 (file)
@@ -960,7 +960,7 @@ static int get_maxfds(void)
        struct rlimit rlim;
 
        if (getrlimit(RLIMIT_NOFILE, &rlim) == 0)
-               return min((int)rlim.rlim_max / 2, 512);
+               return min(rlim.rlim_max / 2, (rlim_t)512);
 
        return 512;
 }
index 4a7b8de..8c10955 100644 (file)
@@ -16,7 +16,7 @@ pinned=0
 exclusive=0
 exclude_user=0
 exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
 exclude_idle=0
 mmap=1
 comm=1
index ddb52f7..5ed674a 100644 (file)
@@ -451,10 +451,10 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
                goto out;
        }
 
-       err = -1;
        link = bpf_program__attach(skel->progs.on_switch);
-       if (!link) {
+       if (IS_ERR(link)) {
                pr_err("Failed to attach leader program\n");
+               err = PTR_ERR(link);
                goto out;
        }
 
@@ -521,9 +521,10 @@ static int bperf__load(struct evsel *evsel, struct target *target)
 
        evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
        if (evsel->bperf_leader_link_fd < 0 &&
-           bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+           bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
+               err = -1;
                goto out;
-
+       }
        /*
         * The bpf_link holds reference to the leader program, and the
         * leader program holds reference to the maps. Therefore, if
@@ -550,6 +551,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
        /* Step 2: load the follower skeleton */
        evsel->follower_skel = bperf_follower_bpf__open();
        if (!evsel->follower_skel) {
+               err = -1;
                pr_err("Failed to open follower skeleton\n");
                goto out;
        }
index b2f4920..7d2ba84 100644 (file)
@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
        if ((tag == DW_TAG_formal_parameter ||
             tag == DW_TAG_variable) &&
            die_compare_name(die_mem, fvp->name) &&
-       /* Does the DIE have location information or external instance? */
+       /*
+        * Does the DIE have location information or const value
+        * or external instance?
+        */
            (dwarf_attr(die_mem, DW_AT_external, &attr) ||
-            dwarf_attr(die_mem, DW_AT_location, &attr)))
+            dwarf_attr(die_mem, DW_AT_location, &attr) ||
+            dwarf_attr(die_mem, DW_AT_const_value, &attr)))
                return DIE_FIND_CB_END;
        if (dwarf_haspc(die_mem, fvp->addr))
                return DIE_FIND_CB_CONTINUE;
index 9130f6f..bc5e4f2 100644 (file)
@@ -144,6 +144,7 @@ static void perf_env__purge_bpf(struct perf_env *env)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
                rb_erase(&node->rb_node, root);
+               free(node->info_linear);
                free(node);
        }
 
index 4a3cd1b..a8d8463 100644 (file)
@@ -428,6 +428,7 @@ struct evsel *evsel__clone(struct evsel *orig)
        evsel->auto_merge_stats = orig->auto_merge_stats;
        evsel->collect_stat = orig->collect_stat;
        evsel->weak_group = orig->weak_group;
+       evsel->use_config_name = orig->use_config_name;
 
        if (evsel__copy_config_terms(evsel, orig) < 0)
                goto out_err;
index 75cf5db..bdad52a 100644 (file)
@@ -83,8 +83,10 @@ struct evsel {
                bool                    collect_stat;
                bool                    weak_group;
                bool                    bpf_counter;
+               bool                    use_config_name;
                int                     bpf_fd;
                struct bpf_object       *bpf_obj;
+               struct list_head        config_terms;
        };
 
        /*
@@ -116,10 +118,8 @@ struct evsel {
        bool                    merged_stat;
        bool                    reset_group;
        bool                    errored;
-       bool                    use_config_name;
        struct hashmap          *per_pkg_mask;
        struct evsel            *leader;
-       struct list_head        config_terms;
        int                     err;
        int                     cpu_iter;
        struct {
index 829af17..0204116 100644 (file)
@@ -103,6 +103,11 @@ static void perf_probe_build_id(struct evsel *evsel)
        evsel->core.attr.build_id = 1;
 }
 
+static void perf_probe_cgroup(struct evsel *evsel)
+{
+       evsel->core.attr.cgroup = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
        return perf_probe_api(perf_probe_sample_identifier);
@@ -182,3 +187,8 @@ bool perf_can_record_build_id(void)
 {
        return perf_probe_api(perf_probe_build_id);
 }
+
+bool perf_can_record_cgroup(void)
+{
+       return perf_probe_api(perf_probe_cgroup);
+}
index f12ca55..b104168 100644 (file)
@@ -12,5 +12,6 @@ bool perf_can_record_switch_events(void);
 bool perf_can_record_text_poke_events(void);
 bool perf_can_sample_identifier(void);
 bool perf_can_record_build_id(void);
+bool perf_can_record_cgroup(void);
 
 #endif // __PERF_API_PROBE_H
index 866f2d5..b029c29 100644 (file)
@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
            immediate_value_is_supported()) {
                Dwarf_Sword snum;
 
+               if (!tvar)
+                       return 0;
+
                dwarf_formsdata(&attr, &snum);
                ret = asprintf(&tvar->value, "\\%ld", (long)snum);
 
index 106b3d6..e59242c 100644 (file)
@@ -1723,6 +1723,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
        if (event->header.size < hdr_sz || event->header.size > buf_sz)
                return -1;
 
+       buf += hdr_sz;
        rest = event->header.size - hdr_sz;
 
        if (readn(fd, buf, rest) != (ssize_t)rest)
index a76fff5..ca326f9 100644 (file)
@@ -541,7 +541,7 @@ static void uniquify_event_name(struct evsel *counter)
        char *config;
        int ret = 0;
 
-       if (counter->uniquified_name ||
+       if (counter->uniquified_name || counter->use_config_name ||
            !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
                                           strlen(counter->pmu_name)))
                return;
@@ -555,10 +555,8 @@ static void uniquify_event_name(struct evsel *counter)
                }
        } else {
                if (perf_pmu__has_hybrid()) {
-                       if (!counter->use_config_name) {
-                               ret = asprintf(&new_name, "%s/%s/",
-                                              counter->pmu_name, counter->name);
-                       }
+                       ret = asprintf(&new_name, "%s/%s/",
+                                      counter->pmu_name, counter->name);
                } else {
                        ret = asprintf(&new_name, "%s [%s]",
                                       counter->name, counter->pmu_name);
index 4c56aa8..a733457 100644 (file)
@@ -2412,6 +2412,7 @@ int cleanup_sdt_note_list(struct list_head *sdt_notes)
 
        list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
                list_del_init(&pos->note_list);
+               zfree(&pos->args);
                zfree(&pos->name);
                zfree(&pos->provider);
                free(pos);
index 1512092..3a9e332 100644 (file)
@@ -1147,7 +1147,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                }
        }
 
-       if (test->insn_processed) {
+       if (!unpriv && test->insn_processed) {
                uint32_t insn_processed;
                char *proc;
 
index ca8fdb1..7d7ebee 100644 (file)
@@ -61,6 +61,8 @@
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 0
 },
index 8a1caf4..e061e87 100644 (file)
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT
 },
 {
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT
 },
 {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 min value is outside of the allowed memory range",
+       .result_unpriv = REJECT,
        .fixup_map_hash_8b = { 3 },
        .result = ACCEPT,
 },
index 17fe33a..2c8935b 100644 (file)
@@ -8,6 +8,8 @@
        BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 7,
 },
index bd5cae4..1c857b2 100644 (file)
@@ -87,6 +87,8 @@
        BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 2,
 },
index 8dcd4e0..11fc68d 100644 (file)
@@ -82,8 +82,8 @@
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
-       .retval_unpriv = 1,
-       .result_unpriv = ACCEPT,
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .retval = 1,
        .result = ACCEPT,
 },
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
-       .result_unpriv = ACCEPT,
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
-       .result_unpriv = ACCEPT,
+       .errstr_unpriv = "R9 !read_ok",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
index bd436df..111801a 100644 (file)
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R7 invalid mem access 'inv'",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 0,
 },
index 7ae2859..a3e593d 100644 (file)
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+       .errstr_unpriv = "R2 pointer comparison prohibited",
        .retval = 0,
 },
 {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        // fake-dead code; targeted from branch A to
-       // prevent dead code sanitization
+       // prevent dead code sanitization, rejected
+       // via branch B however
        BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
+       .errstr_unpriv = "R0 invalid mem access 'inv'",
        .retval = 0,
 },
 {
index bd83158..524c857 100644 (file)
@@ -41,5 +41,6 @@
 /kvm_create_max_vcpus
 /kvm_page_table_test
 /memslot_modification_stress_test
+/memslot_perf_test
 /set_memory_region_test
 /steal_time
index e439d02..daaee18 100644 (file)
@@ -33,7 +33,7 @@ ifeq ($(ARCH),s390)
        UNAME_M := s390x
 endif
 
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
 LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
@@ -74,6 +74,7 @@ TEST_GEN_PROGS_x86_64 += hardware_disable_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_x86_64 += kvm_page_table_test
 TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
+TEST_GEN_PROGS_x86_64 += memslot_perf_test
 TEST_GEN_PROGS_x86_64 += set_memory_region_test
 TEST_GEN_PROGS_x86_64 += steal_time
 
index 5f7a229..b747043 100644 (file)
@@ -9,6 +9,7 @@
 
 #define _GNU_SOURCE /* for pipe2 */
 
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <time.h>
@@ -38,6 +39,7 @@
 
 static int nr_vcpus = 1;
 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static size_t demand_paging_size;
 static char *guest_data_prototype;
 
 static void *vcpu_worker(void *data)
@@ -71,36 +73,51 @@ static void *vcpu_worker(void *data)
        return NULL;
 }
 
-static int handle_uffd_page_request(int uffd, uint64_t addr)
+static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr)
 {
-       pid_t tid;
+       pid_t tid = syscall(__NR_gettid);
        struct timespec start;
        struct timespec ts_diff;
-       struct uffdio_copy copy;
        int r;
 
-       tid = syscall(__NR_gettid);
+       clock_gettime(CLOCK_MONOTONIC, &start);
 
-       copy.src = (uint64_t)guest_data_prototype;
-       copy.dst = addr;
-       copy.len = perf_test_args.host_page_size;
-       copy.mode = 0;
+       if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) {
+               struct uffdio_copy copy;
 
-       clock_gettime(CLOCK_MONOTONIC, &start);
+               copy.src = (uint64_t)guest_data_prototype;
+               copy.dst = addr;
+               copy.len = demand_paging_size;
+               copy.mode = 0;
 
-       r = ioctl(uffd, UFFDIO_COPY, &copy);
-       if (r == -1) {
-               pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n",
-                       addr, tid, errno);
-               return r;
+               r = ioctl(uffd, UFFDIO_COPY, &copy);
+               if (r == -1) {
+                       pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n",
+                               addr, tid, errno);
+                       return r;
+               }
+       } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+               struct uffdio_continue cont = {0};
+
+               cont.range.start = addr;
+               cont.range.len = demand_paging_size;
+
+               r = ioctl(uffd, UFFDIO_CONTINUE, &cont);
+               if (r == -1) {
+                       pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n",
+                               addr, tid, errno);
+                       return r;
+               }
+       } else {
+               TEST_FAIL("Invalid uffd mode %d", uffd_mode);
        }
 
        ts_diff = timespec_elapsed(start);
 
-       PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid,
+       PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid,
                       timespec_to_ns(ts_diff));
        PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
-                      perf_test_args.host_page_size, addr, tid);
+                      demand_paging_size, addr, tid);
 
        return 0;
 }
@@ -108,6 +125,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr)
 bool quit_uffd_thread;
 
 struct uffd_handler_args {
+       int uffd_mode;
        int uffd;
        int pipefd;
        useconds_t delay;
@@ -169,7 +187,7 @@ static void *uffd_handler_thread_fn(void *arg)
                if (r == -1) {
                        if (errno == EAGAIN)
                                continue;
-                       pr_info("Read of uffd gor errno %d", errno);
+                       pr_info("Read of uffd got errno %d\n", errno);
                        return NULL;
                }
 
@@ -184,7 +202,7 @@ static void *uffd_handler_thread_fn(void *arg)
                if (delay)
                        usleep(delay);
                addr =  msg.arg.pagefault.address;
-               r = handle_uffd_page_request(uffd, addr);
+               r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr);
                if (r < 0)
                        return NULL;
                pages++;
@@ -198,43 +216,53 @@ static void *uffd_handler_thread_fn(void *arg)
        return NULL;
 }
 
-static int setup_demand_paging(struct kvm_vm *vm,
-                              pthread_t *uffd_handler_thread, int pipefd,
-                              useconds_t uffd_delay,
-                              struct uffd_handler_args *uffd_args,
-                              void *hva, uint64_t len)
+static void setup_demand_paging(struct kvm_vm *vm,
+                               pthread_t *uffd_handler_thread, int pipefd,
+                               int uffd_mode, useconds_t uffd_delay,
+                               struct uffd_handler_args *uffd_args,
+                               void *hva, void *alias, uint64_t len)
 {
+       bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR);
        int uffd;
        struct uffdio_api uffdio_api;
        struct uffdio_register uffdio_register;
+       uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
 
-       uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
-       if (uffd == -1) {
-               pr_info("uffd creation failed\n");
-               return -1;
+       PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
+                      is_minor ? "MINOR" : "MISSING",
+                      is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
+
+       /* In order to get minor faults, prefault via the alias. */
+       if (is_minor) {
+               size_t p;
+
+               expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
+
+               TEST_ASSERT(alias != NULL, "Alias required for minor faults");
+               for (p = 0; p < (len / demand_paging_size); ++p) {
+                       memcpy(alias + (p * demand_paging_size),
+                              guest_data_prototype, demand_paging_size);
+               }
        }
 
+       uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+       TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
+
        uffdio_api.api = UFFD_API;
        uffdio_api.features = 0;
-       if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
-               pr_info("ioctl uffdio_api failed\n");
-               return -1;
-       }
+       TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
+                   "ioctl UFFDIO_API failed: %" PRIu64,
+                   (uint64_t)uffdio_api.api);
 
        uffdio_register.range.start = (uint64_t)hva;
        uffdio_register.range.len = len;
-       uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
-       if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
-               pr_info("ioctl uffdio_register failed\n");
-               return -1;
-       }
-
-       if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) !=
-                       UFFD_API_RANGE_IOCTLS) {
-               pr_info("unexpected userfaultfd ioctl set\n");
-               return -1;
-       }
+       uffdio_register.mode = uffd_mode;
+       TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
+                   "ioctl UFFDIO_REGISTER failed");
+       TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
+                   expected_ioctls, "missing userfaultfd ioctls");
 
+       uffd_args->uffd_mode = uffd_mode;
        uffd_args->uffd = uffd;
        uffd_args->pipefd = pipefd;
        uffd_args->delay = uffd_delay;
@@ -243,13 +271,12 @@ static int setup_demand_paging(struct kvm_vm *vm,
 
        PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n",
                       hva, hva + len);
-
-       return 0;
 }
 
 struct test_params {
-       bool use_uffd;
+       int uffd_mode;
        useconds_t uffd_delay;
+       enum vm_mem_backing_src_type src_type;
        bool partition_vcpu_memory_access;
 };
 
@@ -267,14 +294,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        int r;
 
        vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
-                                VM_MEM_SRC_ANONYMOUS);
+                                p->src_type);
 
        perf_test_args.wr_fract = 1;
 
-       guest_data_prototype = malloc(perf_test_args.host_page_size);
+       demand_paging_size = get_backing_src_pagesz(p->src_type);
+
+       guest_data_prototype = malloc(demand_paging_size);
        TEST_ASSERT(guest_data_prototype,
                    "Failed to allocate buffer for guest data pattern");
-       memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size);
+       memset(guest_data_prototype, 0xAB, demand_paging_size);
 
        vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
        TEST_ASSERT(vcpu_threads, "Memory allocation failed");
@@ -282,7 +311,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
                              p->partition_vcpu_memory_access);
 
-       if (p->use_uffd) {
+       if (p->uffd_mode) {
                uffd_handler_threads =
                        malloc(nr_vcpus * sizeof(*uffd_handler_threads));
                TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
@@ -296,6 +325,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
                        vm_paddr_t vcpu_gpa;
                        void *vcpu_hva;
+                       void *vcpu_alias;
                        uint64_t vcpu_mem_size;
 
 
@@ -310,8 +340,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
                                       vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
 
-                       /* Cache the HVA pointer of the region */
+                       /* Cache the host addresses of the region */
                        vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
+                       vcpu_alias = addr_gpa2alias(vm, vcpu_gpa);
 
                        /*
                         * Set up user fault fd to handle demand paging
@@ -321,13 +352,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                                  O_CLOEXEC | O_NONBLOCK);
                        TEST_ASSERT(!r, "Failed to set up pipefd");
 
-                       r = setup_demand_paging(vm,
-                                               &uffd_handler_threads[vcpu_id],
-                                               pipefds[vcpu_id * 2],
-                                               p->uffd_delay, &uffd_args[vcpu_id],
-                                               vcpu_hva, vcpu_mem_size);
-                       if (r < 0)
-                               exit(-r);
+                       setup_demand_paging(vm, &uffd_handler_threads[vcpu_id],
+                                           pipefds[vcpu_id * 2], p->uffd_mode,
+                                           p->uffd_delay, &uffd_args[vcpu_id],
+                                           vcpu_hva, vcpu_alias,
+                                           vcpu_mem_size);
                }
        }
 
@@ -355,7 +384,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        pr_info("All vCPU threads joined\n");
 
-       if (p->use_uffd) {
+       if (p->uffd_mode) {
                char c;
 
                /* Tell the user fault fd handler threads to quit */
@@ -377,7 +406,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        free(guest_data_prototype);
        free(vcpu_threads);
-       if (p->use_uffd) {
+       if (p->uffd_mode) {
                free(uffd_handler_threads);
                free(uffd_args);
                free(pipefds);
@@ -387,17 +416,19 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 static void help(char *name)
 {
        puts("");
-       printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
-              "          [-b memory] [-v vcpus] [-o]\n", name);
+       printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
+              "          [-b memory] [-t type] [-v vcpus] [-o]\n", name);
        guest_modes_help();
-       printf(" -u: use User Fault FD to handle vCPU page\n"
-              "     faults.\n");
+       printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
+              "     UFFD registration mode: 'MISSING' or 'MINOR'.\n");
        printf(" -d: add a delay in usec to the User Fault\n"
               "     FD handler to simulate demand paging\n"
               "     overheads. Ignored without -u.\n");
        printf(" -b: specify the size of the memory region which should be\n"
               "     demand paged by each vCPU. e.g. 10M or 3G.\n"
               "     Default: 1G\n");
+       printf(" -t: The type of backing memory to use. Default: anonymous\n");
+       backing_src_help();
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
@@ -409,19 +440,24 @@ int main(int argc, char *argv[])
 {
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
+               .src_type = VM_MEM_SRC_ANONYMOUS,
                .partition_vcpu_memory_access = true,
        };
        int opt;
 
        guest_modes_append_default();
 
-       while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) {
+       while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
                switch (opt) {
                case 'm':
                        guest_modes_cmdline(optarg);
                        break;
                case 'u':
-                       p.use_uffd = true;
+                       if (!strcmp("MISSING", optarg))
+                               p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
+                       else if (!strcmp("MINOR", optarg))
+                               p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
+                       TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
                        break;
                case 'd':
                        p.uffd_delay = strtoul(optarg, NULL, 0);
@@ -430,6 +466,9 @@ int main(int argc, char *argv[])
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
+               case 't':
+                       p.src_type = parse_backing_src_type(optarg);
+                       break;
                case 'v':
                        nr_vcpus = atoi(optarg);
                        TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
@@ -445,6 +484,11 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
+           !backing_src_is_shared(p.src_type)) {
+               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+       }
+
        for_each_guest_mode(run_test, &p);
 
        return 0;
index 5aadf84..4b8db3b 100644 (file)
@@ -132,6 +132,36 @@ static void run_test(uint32_t run)
        TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
 }
 
+void wait_for_child_setup(pid_t pid)
+{
+       /*
+        * Wait for the child to post to the semaphore, but wake up periodically
+        * to check if the child exited prematurely.
+        */
+       for (;;) {
+               const struct timespec wait_period = { .tv_sec = 1 };
+               int status;
+
+               if (!sem_timedwait(sem, &wait_period))
+                       return;
+
+               /* Child is still running, keep waiting. */
+               if (pid != waitpid(pid, &status, WNOHANG))
+                       continue;
+
+               /*
+                * Child is no longer running, which is not expected.
+                *
+                * If it exited with a non-zero status, we explicitly forward
+                * the child's status in case it exited with KSFT_SKIP.
+                */
+               if (WIFEXITED(status))
+                       exit(WEXITSTATUS(status));
+               else
+                       TEST_ASSERT(false, "Child exited unexpectedly");
+       }
+}
+
 int main(int argc, char **argv)
 {
        uint32_t i;
@@ -148,7 +178,7 @@ int main(int argc, char **argv)
                        run_test(i); /* This function always exits */
 
                pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
-               sem_wait(sem);
+               wait_for_child_setup(pid);
                r = (rand() % DELAY_US_MAX) + 1;
                pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
                usleep(r);
index a8f0227..3573956 100644 (file)
@@ -43,6 +43,7 @@ enum vm_guest_mode {
        VM_MODE_P40V48_4K,
        VM_MODE_P40V48_64K,
        VM_MODE_PXXV48_4K,      /* For 48bits VA but ANY bits PA */
+       VM_MODE_P47V64_4K,
        NUM_VM_MODES,
 };
 
@@ -60,7 +61,7 @@ enum vm_guest_mode {
 
 #elif defined(__s390x__)
 
-#define VM_MODE_DEFAULT                        VM_MODE_P52V48_4K
+#define VM_MODE_DEFAULT                        VM_MODE_P47V64_4K
 #define MIN_PAGE_SHIFT                 12U
 #define ptes_per_page(page_size)       ((page_size) / 16)
 
@@ -77,6 +78,7 @@ struct vm_guest_mode_params {
 };
 extern const struct vm_guest_mode_params vm_guest_mode_params[];
 
+int open_kvm_dev_path_or_exit(void);
 int kvm_check_cap(long cap);
 int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
 int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
@@ -146,6 +148,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
 
 /*
  * Address Guest Virtual to Guest Physical
@@ -283,10 +286,11 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
                                            uint32_t num_percpu_pages, void *guest_code,
                                            uint32_t vcpuids[]);
 
-/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
-                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
-                                   void *guest_code, uint32_t vcpuids[]);
+                                   uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+                                   uint32_t num_percpu_pages, void *guest_code,
+                                   uint32_t vcpuids[]);
 
 /*
  * Adds a vCPU with reasonable defaults (e.g. a stack)
@@ -302,7 +306,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
 
 unsigned int vm_get_page_size(struct kvm_vm *vm);
 unsigned int vm_get_page_shift(struct kvm_vm *vm);
-unsigned int vm_get_max_gfn(struct kvm_vm *vm);
+uint64_t vm_get_max_gfn(struct kvm_vm *vm);
 int vm_get_fd(struct kvm_vm *vm);
 
 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
index fade313..d79be15 100644 (file)
@@ -17,6 +17,7 @@
 #include <errno.h>
 #include <unistd.h>
 #include <fcntl.h>
+#include <sys/mman.h>
 #include "kselftest.h"
 
 static inline int _no_printf(const char *format, ...) { return 0; }
@@ -84,6 +85,8 @@ enum vm_mem_backing_src_type {
        VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB,
        VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB,
        VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB,
+       VM_MEM_SRC_SHMEM,
+       VM_MEM_SRC_SHARED_HUGETLB,
        NUM_SRC_TYPES,
 };
 
@@ -100,4 +103,13 @@ size_t get_backing_src_pagesz(uint32_t i);
 void backing_src_help(void);
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
 
+/*
+ * Whether or not the given source type is shared memory (as opposed to
+ * anonymous).
+ */
+static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
+{
+       return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
+}
+
 #endif /* SELFTEST_KVM_TEST_UTIL_H */
index 1c4753f..82171f1 100644 (file)
@@ -268,7 +268,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 
        /* Create a VM with enough guest pages */
        guest_num_pages = test_mem_size / guest_page_size;
-       vm = vm_create_with_vcpus(mode, nr_vcpus,
+       vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
                                  guest_num_pages, 0, guest_code, NULL);
 
        /* Align down GPA of the testing memslot */
index fc83f6c..a2b732c 100644 (file)
@@ -32,6 +32,34 @@ static void *align(void *x, size_t size)
 }
 
 /*
+ * Open KVM_DEV_PATH if available, otherwise exit the entire program.
+ *
+ * Input Args:
+ *   flags - The flags to pass when opening KVM_DEV_PATH.
+ *
+ * Return:
+ *   The opened file descriptor of /dev/kvm.
+ */
+static int _open_kvm_dev_path_or_exit(int flags)
+{
+       int fd;
+
+       fd = open(KVM_DEV_PATH, flags);
+       if (fd < 0) {
+               print_skip("%s not available, is KVM loaded? (errno: %d)",
+                          KVM_DEV_PATH, errno);
+               exit(KSFT_SKIP);
+       }
+
+       return fd;
+}
+
+int open_kvm_dev_path_or_exit(void)
+{
+       return _open_kvm_dev_path_or_exit(O_RDONLY);
+}
+
+/*
  * Capability
  *
  * Input Args:
@@ -52,12 +80,9 @@ int kvm_check_cap(long cap)
        int ret;
        int kvm_fd;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
-
+       kvm_fd = open_kvm_dev_path_or_exit();
        ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
-       TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
+       TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
                "  rc: %i errno: %i", ret, errno);
 
        close(kvm_fd);
@@ -128,9 +153,7 @@ void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
 
 static void vm_open(struct kvm_vm *vm, int perm)
 {
-       vm->kvm_fd = open(KVM_DEV_PATH, perm);
-       if (vm->kvm_fd < 0)
-               exit(KSFT_SKIP);
+       vm->kvm_fd = _open_kvm_dev_path_or_exit(perm);
 
        if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
                print_skip("immediate_exit not available");
@@ -152,6 +175,7 @@ const char *vm_guest_mode_string(uint32_t i)
                [VM_MODE_P40V48_4K]     = "PA-bits:40,  VA-bits:48,  4K pages",
                [VM_MODE_P40V48_64K]    = "PA-bits:40,  VA-bits:48, 64K pages",
                [VM_MODE_PXXV48_4K]     = "PA-bits:ANY, VA-bits:48,  4K pages",
+               [VM_MODE_P47V64_4K]     = "PA-bits:47,  VA-bits:64,  4K pages",
        };
        _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
                       "Missing new mode strings?");
@@ -169,6 +193,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
        { 40, 48,  0x1000, 12 },
        { 40, 48, 0x10000, 16 },
        {  0,  0,  0x1000, 12 },
+       { 47, 64,  0x1000, 12 },
 };
 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
               "Missing new mode params?");
@@ -203,7 +228,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
        TEST_ASSERT(vm != NULL, "Insufficient Memory");
 
        INIT_LIST_HEAD(&vm->vcpus);
-       INIT_LIST_HEAD(&vm->userspace_mem_regions);
+       vm->regions.gpa_tree = RB_ROOT;
+       vm->regions.hva_tree = RB_ROOT;
+       hash_init(vm->regions.slot_hash);
 
        vm->mode = mode;
        vm->type = 0;
@@ -252,6 +279,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
                TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
 #endif
                break;
+       case VM_MODE_P47V64_4K:
+               vm->pgtable_levels = 5;
+               break;
        default:
                TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
        }
@@ -283,21 +313,50 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
        return vm;
 }
 
+/*
+ * VM Create with customized parameters
+ *
+ * Input Args:
+ *   mode - VM Mode (e.g. VM_MODE_P52V48_4K)
+ *   nr_vcpus - VCPU count
+ *   slot0_mem_pages - Slot0 physical memory size
+ *   extra_mem_pages - Non-slot0 physical memory total size
+ *   num_percpu_pages - Per-cpu physical memory pages
+ *   guest_code - Guest entry point
+ *   vcpuids - VCPU IDs
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Pointer to opaque structure that describes the created VM.
+ *
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K),
+ * with customized slot0 memory size, at least 512 pages currently.
+ * extra_mem_pages is only used to calculate the maximum page table size,
+ * no real memory allocation for non-slot0 memory in this function.
+ */
 struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
-                                   uint64_t extra_mem_pages, uint32_t num_percpu_pages,
-                                   void *guest_code, uint32_t vcpuids[])
+                                   uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
+                                   uint32_t num_percpu_pages, void *guest_code,
+                                   uint32_t vcpuids[])
 {
+       uint64_t vcpu_pages, extra_pg_pages, pages;
+       struct kvm_vm *vm;
+       int i;
+
+       /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
+       if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
+               slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
+
        /* The maximum page table size for a memory region will be when the
         * smallest pages are used. Considering each page contains x page
         * table descriptors, the total extra size for page tables (for extra
         * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
         * than N/x*2.
         */
-       uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
-       uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
-       uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
-       struct kvm_vm *vm;
-       int i;
+       vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+       extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+       pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
 
        TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
                    "nr_vcpus = %d too large for host, max-vcpus = %d",
@@ -329,8 +388,8 @@ struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_me
                                            uint32_t num_percpu_pages, void *guest_code,
                                            uint32_t vcpuids[])
 {
-       return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
-                                   num_percpu_pages, guest_code, vcpuids);
+       return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
+                                   extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
 }
 
 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
@@ -355,13 +414,14 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
  */
 void kvm_vm_restart(struct kvm_vm *vmp, int perm)
 {
+       int ctr;
        struct userspace_mem_region *region;
 
        vm_open(vmp, perm);
        if (vmp->has_irqchip)
                vm_create_irqchip(vmp);
 
-       list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
+       hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
                int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
                TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
                            "  rc: %i errno: %i\n"
@@ -424,14 +484,21 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
 static struct userspace_mem_region *
 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
 {
-       struct userspace_mem_region *region;
+       struct rb_node *node;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       for (node = vm->regions.gpa_tree.rb_node; node; ) {
+               struct userspace_mem_region *region =
+                       container_of(node, struct userspace_mem_region, gpa_node);
                uint64_t existing_start = region->region.guest_phys_addr;
                uint64_t existing_end = region->region.guest_phys_addr
                        + region->region.memory_size - 1;
                if (start <= existing_end && end >= existing_start)
                        return region;
+
+               if (start < existing_start)
+                       node = node->rb_left;
+               else
+                       node = node->rb_right;
        }
 
        return NULL;
@@ -546,11 +613,16 @@ void kvm_vm_release(struct kvm_vm *vmp)
 }
 
 static void __vm_mem_region_delete(struct kvm_vm *vm,
-                                  struct userspace_mem_region *region)
+                                  struct userspace_mem_region *region,
+                                  bool unlink)
 {
        int ret;
 
-       list_del(&region->list);
+       if (unlink) {
+               rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
+               rb_erase(&region->hva_node, &vm->regions.hva_tree);
+               hash_del(&region->slot_node);
+       }
 
        region->region.memory_size = 0;
        ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
@@ -569,14 +641,16 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
  */
 void kvm_vm_free(struct kvm_vm *vmp)
 {
-       struct userspace_mem_region *region, *tmp;
+       int ctr;
+       struct hlist_node *node;
+       struct userspace_mem_region *region;
 
        if (vmp == NULL)
                return;
 
        /* Free userspace_mem_regions. */
-       list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
-               __vm_mem_region_delete(vmp, region);
+       hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
+               __vm_mem_region_delete(vmp, region, false);
 
        /* Free sparsebit arrays. */
        sparsebit_free(&vmp->vpages_valid);
@@ -658,13 +732,64 @@ int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
        return 0;
 }
 
+static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
+                                              struct userspace_mem_region *region)
+{
+       struct rb_node **cur, *parent;
+
+       for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
+               struct userspace_mem_region *cregion;
+
+               cregion = container_of(*cur, typeof(*cregion), gpa_node);
+               parent = *cur;
+               if (region->region.guest_phys_addr <
+                   cregion->region.guest_phys_addr)
+                       cur = &(*cur)->rb_left;
+               else {
+                       TEST_ASSERT(region->region.guest_phys_addr !=
+                                   cregion->region.guest_phys_addr,
+                                   "Duplicate GPA in region tree");
+
+                       cur = &(*cur)->rb_right;
+               }
+       }
+
+       rb_link_node(&region->gpa_node, parent, cur);
+       rb_insert_color(&region->gpa_node, gpa_tree);
+}
+
+static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
+                                              struct userspace_mem_region *region)
+{
+       struct rb_node **cur, *parent;
+
+       for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
+               struct userspace_mem_region *cregion;
+
+               cregion = container_of(*cur, typeof(*cregion), hva_node);
+               parent = *cur;
+               if (region->host_mem < cregion->host_mem)
+                       cur = &(*cur)->rb_left;
+               else {
+                       TEST_ASSERT(region->host_mem !=
+                                   cregion->host_mem,
+                                   "Duplicate HVA in region tree");
+
+                       cur = &(*cur)->rb_right;
+               }
+       }
+
+       rb_link_node(&region->hva_node, parent, cur);
+       rb_insert_color(&region->hva_node, hva_tree);
+}
+
 /*
  * VM Userspace Memory Region Add
  *
  * Input Args:
  *   vm - Virtual Machine
- *   backing_src - Storage source for this region.
- *                 NULL to use anonymous memory.
+ *   src_type - Storage source for this region.
+ *              NULL to use anonymous memory.
  *   guest_paddr - Starting guest physical address
  *   slot - KVM region slot
  *   npages - Number of physical pages
@@ -722,7 +847,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
                        (uint64_t) region->region.memory_size);
 
        /* Confirm no region with the requested slot already exists. */
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+                              slot) {
                if (region->region.slot != slot)
                        continue;
 
@@ -755,11 +881,30 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        if (alignment > 1)
                region->mmap_size += alignment;
 
+       region->fd = -1;
+       if (backing_src_is_shared(src_type)) {
+               int memfd_flags = MFD_CLOEXEC;
+
+               if (src_type == VM_MEM_SRC_SHARED_HUGETLB)
+                       memfd_flags |= MFD_HUGETLB;
+
+               region->fd = memfd_create("kvm_selftest", memfd_flags);
+               TEST_ASSERT(region->fd != -1,
+                           "memfd_create failed, errno: %i", errno);
+
+               ret = ftruncate(region->fd, region->mmap_size);
+               TEST_ASSERT(ret == 0, "ftruncate failed, errno: %i", errno);
+
+               ret = fallocate(region->fd,
+                               FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
+                               region->mmap_size);
+               TEST_ASSERT(ret == 0, "fallocate failed, errno: %i", errno);
+       }
+
        region->mmap_start = mmap(NULL, region->mmap_size,
                                  PROT_READ | PROT_WRITE,
-                                 MAP_PRIVATE | MAP_ANONYMOUS
-                                 | vm_mem_backing_src_alias(src_type)->flag,
-                                 -1, 0);
+                                 vm_mem_backing_src_alias(src_type)->flag,
+                                 region->fd, 0);
        TEST_ASSERT(region->mmap_start != MAP_FAILED,
                    "test_malloc failed, mmap_start: %p errno: %i",
                    region->mmap_start, errno);
@@ -793,8 +938,23 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
                ret, errno, slot, flags,
                guest_paddr, (uint64_t) region->region.memory_size);
 
-       /* Add to linked-list of memory regions. */
-       list_add(&region->list, &vm->userspace_mem_regions);
+       /* Add to quick lookup data structures */
+       vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
+       vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
+       hash_add(vm->regions.slot_hash, &region->slot_node, slot);
+
+       /* If shared memory, create an alias. */
+       if (region->fd >= 0) {
+               region->mmap_alias = mmap(NULL, region->mmap_size,
+                                         PROT_READ | PROT_WRITE,
+                                         vm_mem_backing_src_alias(src_type)->flag,
+                                         region->fd, 0);
+               TEST_ASSERT(region->mmap_alias != MAP_FAILED,
+                           "mmap of alias failed, errno: %i", errno);
+
+               /* Align host alias address */
+               region->host_alias = align(region->mmap_alias, alignment);
+       }
 }
 
 /*
@@ -817,10 +977,10 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
 {
        struct userspace_mem_region *region;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
+                              memslot)
                if (region->region.slot == memslot)
                        return region;
-       }
 
        fprintf(stderr, "No mem region with the requested slot found,\n"
                "  requested slot: %u\n", memslot);
@@ -905,7 +1065,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
  */
 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
 {
-       __vm_mem_region_delete(vm, memslot2region(vm, slot));
+       __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
 }
 
 /*
@@ -925,9 +1085,7 @@ static int vcpu_mmap_sz(void)
 {
        int dev_fd, ret;
 
-       dev_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (dev_fd < 0)
-               exit(KSFT_SKIP);
+       dev_fd = open_kvm_dev_path_or_exit();
 
        ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
        TEST_ASSERT(ret >= sizeof(struct kvm_run),
@@ -1099,6 +1257,9 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
        uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
 
        virt_pgd_alloc(vm, pgd_memslot);
+       vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
+                                             KVM_UTIL_MIN_PFN * vm->page_size,
+                                             data_memslot);
 
        /*
         * Find an unused range of virtual page addresses of at least
@@ -1108,11 +1269,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
 
        /* Map the virtual pages. */
        for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
-               pages--, vaddr += vm->page_size) {
-               vm_paddr_t paddr;
-
-               paddr = vm_phy_page_alloc(vm,
-                               KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
+               pages--, vaddr += vm->page_size, paddr += vm->page_size) {
 
                virt_pg_map(vm, vaddr, paddr, pgd_memslot);
 
@@ -1177,16 +1334,14 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
 {
        struct userspace_mem_region *region;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
-               if ((gpa >= region->region.guest_phys_addr)
-                       && (gpa <= (region->region.guest_phys_addr
-                               + region->region.memory_size - 1)))
-                       return (void *) ((uintptr_t) region->host_mem
-                               + (gpa - region->region.guest_phys_addr));
+       region = userspace_mem_region_find(vm, gpa, gpa);
+       if (!region) {
+               TEST_FAIL("No vm physical memory at 0x%lx", gpa);
+               return NULL;
        }
 
-       TEST_FAIL("No vm physical memory at 0x%lx", gpa);
-       return NULL;
+       return (void *)((uintptr_t)region->host_mem
+               + (gpa - region->region.guest_phys_addr));
 }
 
 /*
@@ -1208,15 +1363,22 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
  */
 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
 {
-       struct userspace_mem_region *region;
+       struct rb_node *node;
 
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
-               if ((hva >= region->host_mem)
-                       && (hva <= (region->host_mem
-                               + region->region.memory_size - 1)))
-                       return (vm_paddr_t) ((uintptr_t)
-                               region->region.guest_phys_addr
-                               + (hva - (uintptr_t) region->host_mem));
+       for (node = vm->regions.hva_tree.rb_node; node; ) {
+               struct userspace_mem_region *region =
+                       container_of(node, struct userspace_mem_region, hva_node);
+
+               if (hva >= region->host_mem) {
+                       if (hva <= (region->host_mem
+                               + region->region.memory_size - 1))
+                               return (vm_paddr_t)((uintptr_t)
+                                       region->region.guest_phys_addr
+                                       + (hva - (uintptr_t)region->host_mem));
+
+                       node = node->rb_right;
+               } else
+                       node = node->rb_left;
        }
 
        TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
@@ -1224,6 +1386,42 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
 }
 
 /*
+ * Address VM physical to Host Virtual *alias*.
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   gpa - VM physical address
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Equivalent address within the host virtual *alias* area, or NULL
+ *   (without failing the test) if the guest memory is not shared (so
+ *   no alias exists).
+ *
+ * When vm_create() and related functions are called with a shared memory
+ * src_type, we also create a writable, shared alias mapping of the
+ * underlying guest memory. This allows the host to manipulate guest memory
+ * without mapping that memory in the guest's address space. And, for
+ * userfaultfd-based demand paging, we can do so without triggering userfaults.
+ */
+void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+       struct userspace_mem_region *region;
+       uintptr_t offset;
+
+       region = userspace_mem_region_find(vm, gpa, gpa);
+       if (!region)
+               return NULL;
+
+       if (!region->host_alias)
+               return NULL;
+
+       offset = gpa - region->region.guest_phys_addr;
+       return (void *) ((uintptr_t) region->host_alias + offset);
+}
+
+/*
  * VM Create IRQ Chip
  *
  * Input Args:
@@ -1822,6 +2020,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
  */
 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 {
+       int ctr;
        struct userspace_mem_region *region;
        struct vcpu *vcpu;
 
@@ -1829,7 +2028,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
        fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
        fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
        fprintf(stream, "%*sMem Regions:\n", indent, "");
-       list_for_each_entry(region, &vm->userspace_mem_regions, list) {
+       hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
                fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
                        "host_virt: %p\n", indent + 2, "",
                        (uint64_t) region->region.guest_phys_addr,
@@ -2015,10 +2214,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
 
        if (vm == NULL) {
                /* Ensure that the KVM vendor-specific module is loaded. */
-               f = fopen(KVM_DEV_PATH, "r");
-               TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
-                           errno);
-               fclose(f);
+               close(open_kvm_dev_path_or_exit());
        }
 
        f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
@@ -2041,7 +2237,7 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
        return vm->page_shift;
 }
 
-unsigned int vm_get_max_gfn(struct kvm_vm *vm)
+uint64_t vm_get_max_gfn(struct kvm_vm *vm)
 {
        return vm->max_gfn;
 }
index 91ce1b5..a03febc 100644 (file)
@@ -8,6 +8,9 @@
 #ifndef SELFTEST_KVM_UTIL_INTERNAL_H
 #define SELFTEST_KVM_UTIL_INTERNAL_H
 
+#include "linux/hashtable.h"
+#include "linux/rbtree.h"
+
 #include "sparsebit.h"
 
 struct userspace_mem_region {
@@ -16,9 +19,13 @@ struct userspace_mem_region {
        int fd;
        off_t offset;
        void *host_mem;
+       void *host_alias;
        void *mmap_start;
+       void *mmap_alias;
        size_t mmap_size;
-       struct list_head list;
+       struct rb_node gpa_node;
+       struct rb_node hva_node;
+       struct hlist_node slot_node;
 };
 
 struct vcpu {
@@ -31,6 +38,12 @@ struct vcpu {
        uint32_t dirty_gfns_count;
 };
 
+struct userspace_mem_regions {
+       struct rb_root gpa_tree;
+       struct rb_root hva_tree;
+       DECLARE_HASHTABLE(slot_hash, 9);
+};
+
 struct kvm_vm {
        int mode;
        unsigned long type;
@@ -43,7 +56,7 @@ struct kvm_vm {
        unsigned int va_bits;
        uint64_t max_gfn;
        struct list_head vcpus;
-       struct list_head userspace_mem_regions;
+       struct userspace_mem_regions regions;
        struct sparsebit *vpages_valid;
        struct sparsebit *vpages_mapped;
        bool has_irqchip;
index 81490b9..7397ca2 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Copyright (C) 2020, Google LLC.
  */
+#include <inttypes.h>
 
 #include "kvm_util.h"
 #include "perf_test_util.h"
@@ -68,7 +69,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
        TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
                    "Guest memory size is not guest page size aligned.");
 
-       vm = vm_create_with_vcpus(mode, vcpus,
+       vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
                                  (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
                                  0, guest_code, NULL);
 
@@ -80,7 +81,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
         */
        TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
                    "Requested more guest memory than address space allows.\n"
-                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+                   "    guest pages: %" PRIx64 " max gfn: %" PRIx64
+                   " vcpus: %d wss: %" PRIx64 "]\n",
                    guest_num_pages, vm_get_max_gfn(vm), vcpus,
                    vcpu_memory_bytes);
 
diff --git a/tools/testing/selftests/kvm/lib/rbtree.c b/tools/testing/selftests/kvm/lib/rbtree.c
new file mode 100644 (file)
index 0000000..a703f01
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../../lib/rbtree.c"
index 63d2bc7..af1031f 100644 (file)
@@ -166,72 +166,89 @@ size_t get_def_hugetlb_pagesz(void)
        return 0;
 }
 
+#define ANON_FLAGS     (MAP_PRIVATE | MAP_ANONYMOUS)
+#define ANON_HUGE_FLAGS        (ANON_FLAGS | MAP_HUGETLB)
+
 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
 {
        static const struct vm_mem_backing_src_alias aliases[] = {
                [VM_MEM_SRC_ANONYMOUS] = {
                        .name = "anonymous",
-                       .flag = 0,
+                       .flag = ANON_FLAGS,
                },
                [VM_MEM_SRC_ANONYMOUS_THP] = {
                        .name = "anonymous_thp",
-                       .flag = 0,
+                       .flag = ANON_FLAGS,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB] = {
                        .name = "anonymous_hugetlb",
-                       .flag = MAP_HUGETLB,
+                       .flag = ANON_HUGE_FLAGS,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = {
                        .name = "anonymous_hugetlb_16kb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_16KB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_16KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = {
                        .name = "anonymous_hugetlb_64kb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_64KB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_64KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = {
                        .name = "anonymous_hugetlb_512kb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_512KB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_512KB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = {
                        .name = "anonymous_hugetlb_1mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_1MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_1MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = {
                        .name = "anonymous_hugetlb_2mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_2MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = {
                        .name = "anonymous_hugetlb_8mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_8MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_8MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = {
                        .name = "anonymous_hugetlb_16mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_16MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_16MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = {
                        .name = "anonymous_hugetlb_32mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_32MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_32MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = {
                        .name = "anonymous_hugetlb_256mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_256MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_256MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = {
                        .name = "anonymous_hugetlb_512mb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_512MB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_512MB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = {
                        .name = "anonymous_hugetlb_1gb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_1GB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_1GB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = {
                        .name = "anonymous_hugetlb_2gb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_2GB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_2GB,
                },
                [VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = {
                        .name = "anonymous_hugetlb_16gb",
-                       .flag = MAP_HUGETLB | MAP_HUGE_16GB,
+                       .flag = ANON_HUGE_FLAGS | MAP_HUGE_16GB,
+               },
+               [VM_MEM_SRC_SHMEM] = {
+                       .name = "shmem",
+                       .flag = MAP_SHARED,
+               },
+               [VM_MEM_SRC_SHARED_HUGETLB] = {
+                       .name = "shared_hugetlb",
+                       /*
+                        * No MAP_HUGETLB, we use MFD_HUGETLB instead. Since
+                        * we're using "file backed" memory, we need to specify
+                        * this when the FD is created, not when the area is
+                        * mapped.
+                        */
+                       .flag = MAP_SHARED,
                },
        };
        _Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES,
@@ -250,10 +267,12 @@ size_t get_backing_src_pagesz(uint32_t i)
 
        switch (i) {
        case VM_MEM_SRC_ANONYMOUS:
+       case VM_MEM_SRC_SHMEM:
                return getpagesize();
        case VM_MEM_SRC_ANONYMOUS_THP:
                return get_trans_hugepagesz();
        case VM_MEM_SRC_ANONYMOUS_HUGETLB:
+       case VM_MEM_SRC_SHARED_HUGETLB:
                return get_def_hugetlb_pagesz();
        default:
                return MAP_HUGE_PAGE_SIZE(flag);
index a8906e6..efe2350 100644 (file)
@@ -657,9 +657,7 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
                return cpuid;
 
        cpuid = allocate_kvm_cpuid2();
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
        TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
@@ -691,9 +689,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
 
        buffer.header.nmsrs = 1;
        buffer.entry.index = msr_index;
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
        TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
@@ -986,9 +982,7 @@ struct kvm_msr_list *kvm_get_msr_index_list(void)
        struct kvm_msr_list *list;
        int nmsrs, r, kvm_fd;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        nmsrs = kvm_get_num_msrs_fd(kvm_fd);
        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
@@ -1312,9 +1306,7 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
                return cpuid;
 
        cpuid = allocate_kvm_cpuid2();
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
        TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
index 6096bf0..98351ba 100644 (file)
@@ -71,14 +71,22 @@ struct memslot_antagonist_args {
 };
 
 static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
-                             uint64_t nr_modifications, uint64_t gpa)
+                              uint64_t nr_modifications)
 {
+       const uint64_t pages = 1;
+       uint64_t gpa;
        int i;
 
+       /*
+        * Add the dummy memslot just below the perf_test_util memslot, which is
+        * at the top of the guest physical address space.
+        */
+       gpa = guest_test_phys_mem - pages * vm_get_page_size(vm);
+
        for (i = 0; i < nr_modifications; i++) {
                usleep(delay);
                vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
-                                           DUMMY_MEMSLOT_INDEX, 1, 0);
+                                           DUMMY_MEMSLOT_INDEX, pages, 0);
 
                vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
        }
@@ -120,11 +128,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        pr_info("Started all vCPUs\n");
 
        add_remove_memslot(vm, p->memslot_modification_delay,
-                          p->nr_memslot_modifications,
-                          guest_test_phys_mem +
-                          (guest_percpu_mem_size * nr_vcpus) +
-                          perf_test_args.host_page_size +
-                          perf_test_args.guest_page_size);
+                          p->nr_memslot_modifications);
 
        run_vcpus = false;
 
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
new file mode 100644 (file)
index 0000000..1123965
--- /dev/null
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A memslot-related performance benchmark.
+ *
+ * Copyright (C) 2021 Oracle and/or its affiliates.
+ *
+ * Basic guest setup / host vCPU thread code lifted from set_memory_region_test.
+ */
+#include <pthread.h>
+#include <sched.h>
+#include <semaphore.h>
+#include <stdatomic.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <linux/compiler.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define VCPU_ID 0
+
+#define MEM_SIZE               ((512U << 20) + 4096)
+#define MEM_SIZE_PAGES         (MEM_SIZE / 4096)
+#define MEM_GPA                0x10000000UL
+#define MEM_AUX_GPA            MEM_GPA
+#define MEM_SYNC_GPA           MEM_AUX_GPA
+#define MEM_TEST_GPA           (MEM_AUX_GPA + 4096)
+#define MEM_TEST_SIZE          (MEM_SIZE - 4096)
+static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
+static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
+
+/*
+ * 32 MiB is max size that gets well over 100 iterations on 509 slots.
+ * Considering that each slot needs to have at least one page up to
+ * 8194 slots in use can then be tested (although with slightly
+ * limited resolution).
+ */
+#define MEM_SIZE_MAP           ((32U << 20) + 4096)
+#define MEM_SIZE_MAP_PAGES     (MEM_SIZE_MAP / 4096)
+#define MEM_TEST_MAP_SIZE      (MEM_SIZE_MAP - 4096)
+#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
+static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
+static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
+
+/*
+ * 128 MiB is min size that fills 32k slots with at least one page in each
+ * while at the same time gets 100+ iterations in such test
+ */
+#define MEM_TEST_UNMAP_SIZE            (128U << 20)
+#define MEM_TEST_UNMAP_SIZE_PAGES      (MEM_TEST_UNMAP_SIZE / 4096)
+/* 2 MiB chunk size like a typical huge page */
+#define MEM_TEST_UNMAP_CHUNK_PAGES     (2U << (20 - 12))
+static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
+             "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
+             "invalid unmap test region size");
+static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
+             (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
+             "invalid unmap test region size");
+
+/*
+ * For the move active test the middle of the test area is placed on
+ * a memslot boundary: half lies in the memslot being moved, half in
+ * other memslot(s).
+ *
+ * When running this test with 32k memslots (32764, really) each memslot
+ * contains 4 pages.
+ * The last one additionally contains the remaining 21 pages of memory,
+ * for the total size of 25 pages.
+ * Hence, the maximum size here is 50 pages.
+ */
+#define MEM_TEST_MOVE_SIZE_PAGES       (50)
+#define MEM_TEST_MOVE_SIZE             (MEM_TEST_MOVE_SIZE_PAGES * 4096)
+#define MEM_TEST_MOVE_GPA_DEST         (MEM_GPA + MEM_SIZE)
+static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
+             "invalid move test region size");
+
+#define MEM_TEST_VAL_1 0x1122334455667788
+#define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00
+
+struct vm_data {
+       struct kvm_vm *vm;
+       pthread_t vcpu_thread;
+       uint32_t nslots;
+       uint64_t npages;
+       uint64_t pages_per_slot;
+       void **hva_slots;
+       bool mmio_ok;
+       uint64_t mmio_gpa_min;
+       uint64_t mmio_gpa_max;
+};
+
+struct sync_area {
+       atomic_bool start_flag;
+       atomic_bool exit_flag;
+       atomic_bool sync_flag;
+       void *move_area_ptr;
+};
+
+/*
+ * Technically, we need also for the atomic bool to be address-free, which
+ * is recommended, but not strictly required, by C11 for lockless
+ * implementations.
+ * However, in practice both GCC and Clang fulfill this requirement on
+ * all KVM-supported platforms.
+ */
+static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless");
+
+static sem_t vcpu_ready;
+
+static bool map_unmap_verify;
+
+static bool verbose;
+#define pr_info_v(...)                         \
+       do {                                    \
+               if (verbose)                    \
+                       pr_info(__VA_ARGS__);   \
+       } while (0)
+
+static void *vcpu_worker(void *data)
+{
+       struct vm_data *vm = data;
+       struct kvm_run *run;
+       struct ucall uc;
+       uint64_t cmd;
+
+       run = vcpu_state(vm->vm, VCPU_ID);
+       while (1) {
+               vcpu_run(vm->vm, VCPU_ID);
+
+               if (run->exit_reason == KVM_EXIT_IO) {
+                       cmd = get_ucall(vm->vm, VCPU_ID, &uc);
+                       if (cmd != UCALL_SYNC)
+                               break;
+
+                       sem_post(&vcpu_ready);
+                       continue;
+               }
+
+               if (run->exit_reason != KVM_EXIT_MMIO)
+                       break;
+
+               TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
+               TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
+               TEST_ASSERT(run->mmio.len == 8,
+                           "Unexpected exit mmio size = %u", run->mmio.len);
+               TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
+                           run->mmio.phys_addr <= vm->mmio_gpa_max,
+                           "Unexpected exit mmio address = 0x%llx",
+                           run->mmio.phys_addr);
+       }
+
+       if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
+               TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
+                         __FILE__, uc.args[1], uc.args[2]);
+
+       return NULL;
+}
+
+static void wait_for_vcpu(void)
+{
+       struct timespec ts;
+
+       TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
+                   "clock_gettime() failed: %d\n", errno);
+
+       ts.tv_sec += 2;
+       TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
+                   "sem_timedwait() failed: %d\n", errno);
+}
+
+static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
+{
+       uint64_t gpage, pgoffs;
+       uint32_t slot, slotoffs;
+       void *base;
+
+       TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
+       TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096,
+                   "Too high gpa to translate");
+       gpa -= MEM_GPA;
+
+       gpage = gpa / 4096;
+       pgoffs = gpa % 4096;
+       slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
+       slotoffs = gpage - (slot * data->pages_per_slot);
+
+       if (rempages) {
+               uint64_t slotpages;
+
+               if (slot == data->nslots - 1)
+                       slotpages = data->npages - slot * data->pages_per_slot;
+               else
+                       slotpages = data->pages_per_slot;
+
+               TEST_ASSERT(!pgoffs,
+                           "Asking for remaining pages in slot but gpa not page aligned");
+               *rempages = slotpages - slotoffs;
+       }
+
+       base = data->hva_slots[slot];
+       return (uint8_t *)base + slotoffs * 4096 + pgoffs;
+}
+
+static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
+{
+       TEST_ASSERT(slot < data->nslots, "Too high slot number");
+
+       return MEM_GPA + slot * data->pages_per_slot * 4096;
+}
+
+static struct vm_data *alloc_vm(void)
+{
+       struct vm_data *data;
+
+       data = malloc(sizeof(*data));
+       TEST_ASSERT(data, "malloc(vmdata) failed");
+
+       data->vm = NULL;
+       data->hva_slots = NULL;
+
+       return data;
+}
+
+static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
+                      void *guest_code, uint64_t mempages,
+                      struct timespec *slot_runtime)
+{
+       uint32_t max_mem_slots;
+       uint64_t rempages;
+       uint64_t guest_addr;
+       uint32_t slot;
+       struct timespec tstart;
+       struct sync_area *sync;
+
+       max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
+       TEST_ASSERT(max_mem_slots > 1,
+                   "KVM_CAP_NR_MEMSLOTS should be greater than 1");
+       TEST_ASSERT(nslots > 1 || nslots == -1,
+                   "Slot count cap should be greater than 1");
+       if (nslots != -1)
+               max_mem_slots = min(max_mem_slots, (uint32_t)nslots);
+       pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots);
+
+       TEST_ASSERT(mempages > 1,
+                   "Can't test without any memory");
+
+       data->npages = mempages;
+       data->nslots = max_mem_slots - 1;
+       data->pages_per_slot = mempages / data->nslots;
+       if (!data->pages_per_slot) {
+               *maxslots = mempages + 1;
+               return false;
+       }
+
+       rempages = mempages % data->nslots;
+       data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
+       TEST_ASSERT(data->hva_slots, "malloc() fail");
+
+       data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
+
+       pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
+               max_mem_slots - 1, data->pages_per_slot, rempages);
+
+       clock_gettime(CLOCK_MONOTONIC, &tstart);
+       for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) {
+               uint64_t npages;
+
+               npages = data->pages_per_slot;
+               if (slot == max_mem_slots - 1)
+                       npages += rempages;
+
+               vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
+                                           guest_addr, slot, npages,
+                                           0);
+               guest_addr += npages * 4096;
+       }
+       *slot_runtime = timespec_elapsed(tstart);
+
+       for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) {
+               uint64_t npages;
+               uint64_t gpa;
+
+               npages = data->pages_per_slot;
+               if (slot == max_mem_slots - 2)
+                       npages += rempages;
+
+               gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr,
+                                        slot + 1);
+               TEST_ASSERT(gpa == guest_addr,
+                           "vm_phy_pages_alloc() failed\n");
+
+               data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr);
+               memset(data->hva_slots[slot], 0, npages * 4096);
+
+               guest_addr += npages * 4096;
+       }
+
+       virt_map(data->vm, MEM_GPA, MEM_GPA, mempages, 0);
+
+       sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+       atomic_init(&sync->start_flag, false);
+       atomic_init(&sync->exit_flag, false);
+       atomic_init(&sync->sync_flag, false);
+
+       data->mmio_ok = false;
+
+       return true;
+}
+
+static void launch_vm(struct vm_data *data)
+{
+       pr_info_v("Launching the test VM\n");
+
+       pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data);
+
+       /* Ensure the guest thread is spun up. */
+       wait_for_vcpu();
+}
+
+static void free_vm(struct vm_data *data)
+{
+       kvm_vm_free(data->vm);
+       free(data->hva_slots);
+       free(data);
+}
+
+static void wait_guest_exit(struct vm_data *data)
+{
+       pthread_join(data->vcpu_thread, NULL);
+}
+
+static void let_guest_run(struct sync_area *sync)
+{
+       atomic_store_explicit(&sync->start_flag, true, memory_order_release);
+}
+
+static void guest_spin_until_start(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire))
+               ;
+}
+
+static void make_guest_exit(struct sync_area *sync)
+{
+       atomic_store_explicit(&sync->exit_flag, true, memory_order_release);
+}
+
+static bool _guest_should_exit(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       return atomic_load_explicit(&sync->exit_flag, memory_order_acquire);
+}
+
+#define guest_should_exit() unlikely(_guest_should_exit())
+
+/*
+ * noinline so we can easily see how much time the host spends waiting
+ * for the guest.
+ * For the same reason use alarm() instead of polling clock_gettime()
+ * to implement a wait timeout.
+ */
+static noinline void host_perform_sync(struct sync_area *sync)
+{
+       alarm(2);
+
+       atomic_store_explicit(&sync->sync_flag, true, memory_order_release);
+       while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire))
+               ;
+
+       alarm(0);
+}
+
+static bool guest_perform_sync(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+       bool expected;
+
+       do {
+               if (guest_should_exit())
+                       return false;
+
+               expected = true;
+       } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag,
+                                                       &expected, false,
+                                                       memory_order_acq_rel,
+                                                       memory_order_relaxed));
+
+       return true;
+}
+
+static void guest_code_test_memslot_move(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+       uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);
+
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (!guest_should_exit()) {
+               uintptr_t ptr;
+
+               for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
+                    ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               /*
+                * No host sync here since the MMIO exits are so expensive
+                * that the host would spend most of its time waiting for
+                * the guest and so instead of measuring memslot move
+                * performance we would measure the performance and
+                * likelihood of MMIO exits
+                */
+       }
+
+       GUEST_DONE();
+}
+
+static void guest_code_test_memslot_map(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (1) {
+               uintptr_t ptr;
+
+               for (ptr = MEM_TEST_GPA;
+                    ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               if (!guest_perform_sync())
+                       break;
+
+               for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
+                    ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+               if (!guest_perform_sync())
+                       break;
+       }
+
+       GUEST_DONE();
+}
+
+static void guest_code_test_memslot_unmap(void)
+{
+       struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
+
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (1) {
+               uintptr_t ptr = MEM_TEST_GPA;
+
+               /*
+                * We can afford to access (map) just a small number of pages
+                * per host sync as otherwise the host will spend
+                * a significant amount of its time waiting for the guest
+                * (instead of doing unmap operations), so this will
+                * effectively turn this test into a map performance test.
+                *
+                * Just access a single page to be on the safe side.
+                */
+               *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               if (!guest_perform_sync())
+                       break;
+
+               ptr += MEM_TEST_UNMAP_SIZE / 2;
+               *(uint64_t *)ptr = MEM_TEST_VAL_2;
+
+               if (!guest_perform_sync())
+                       break;
+       }
+
+       GUEST_DONE();
+}
+
+static void guest_code_test_memslot_rw(void)
+{
+       GUEST_SYNC(0);
+
+       guest_spin_until_start();
+
+       while (1) {
+               uintptr_t ptr;
+
+               for (ptr = MEM_TEST_GPA;
+                    ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096)
+                       *(uint64_t *)ptr = MEM_TEST_VAL_1;
+
+               if (!guest_perform_sync())
+                       break;
+
+               for (ptr = MEM_TEST_GPA + 4096 / 2;
+                    ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) {
+                       uint64_t val = *(uint64_t *)ptr;
+
+                       GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
+                       *(uint64_t *)ptr = 0;
+               }
+
+               if (!guest_perform_sync())
+                       break;
+       }
+
+       GUEST_DONE();
+}
+
+static bool test_memslot_move_prepare(struct vm_data *data,
+                                     struct sync_area *sync,
+                                     uint64_t *maxslots, bool isactive)
+{
+       uint64_t movesrcgpa, movetestgpa;
+
+       movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+
+       if (isactive) {
+               uint64_t lastpages;
+
+               vm_gpa2hva(data, movesrcgpa, &lastpages);
+               if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) {
+                       *maxslots = 0;
+                       return false;
+               }
+       }
+
+       movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1));
+       sync->move_area_ptr = (void *)movetestgpa;
+
+       if (isactive) {
+               data->mmio_ok = true;
+               data->mmio_gpa_min = movesrcgpa;
+               data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1;
+       }
+
+       return true;
+}
+
+static bool test_memslot_move_prepare_active(struct vm_data *data,
+                                            struct sync_area *sync,
+                                            uint64_t *maxslots)
+{
+       return test_memslot_move_prepare(data, sync, maxslots, true);
+}
+
+static bool test_memslot_move_prepare_inactive(struct vm_data *data,
+                                              struct sync_area *sync,
+                                              uint64_t *maxslots)
+{
+       return test_memslot_move_prepare(data, sync, maxslots, false);
+}
+
+static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
+{
+       uint64_t movesrcgpa;
+
+       movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
+       vm_mem_region_move(data->vm, data->nslots - 1 + 1,
+                          MEM_TEST_MOVE_GPA_DEST);
+       vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa);
+}
+
+static void test_memslot_do_unmap(struct vm_data *data,
+                                 uint64_t offsp, uint64_t count)
+{
+       uint64_t gpa, ctr;
+
+       for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) {
+               uint64_t npages;
+               void *hva;
+               int ret;
+
+               hva = vm_gpa2hva(data, gpa, &npages);
+               TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
+               npages = min(npages, count - ctr);
+               ret = madvise(hva, npages * 4096, MADV_DONTNEED);
+               TEST_ASSERT(!ret,
+                           "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
+                           hva, gpa);
+               ctr += npages;
+               gpa += npages * 4096;
+       }
+       TEST_ASSERT(ctr == count,
+                   "madvise(MADV_DONTNEED) should exactly cover all of the requested area");
+}
+
+static void test_memslot_map_unmap_check(struct vm_data *data,
+                                        uint64_t offsp, uint64_t valexp)
+{
+       uint64_t gpa;
+       uint64_t *val;
+
+       if (!map_unmap_verify)
+               return;
+
+       gpa = MEM_TEST_GPA + offsp * 4096;
+       val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
+       TEST_ASSERT(*val == valexp,
+                   "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
+                   *val, valexp, gpa);
+       *val = 0;
+}
+
+static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
+{
+       /*
+        * Unmap the second half of the test area while guest writes to (maps)
+        * the first half.
+        */
+       test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+                             MEM_TEST_MAP_SIZE_PAGES / 2);
+
+       /*
+        * Wait for the guest to finish writing the first half of the test
+        * area, verify the written value on the first and the last page of
+        * this area and then unmap it.
+        * Meanwhile, the guest is writing to (mapping) the second half of
+        * the test area.
+        */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+       test_memslot_map_unmap_check(data,
+                                    MEM_TEST_MAP_SIZE_PAGES / 2 - 1,
+                                    MEM_TEST_VAL_1);
+       test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2);
+
+
+       /*
+        * Wait for the guest to finish writing the second half of the test
+        * area and verify the written value on the first and the last page
+        * of this area.
+        * The area will be unmapped at the beginning of the next loop
+        * iteration.
+        * Meanwhile, the guest is writing to (mapping) the first half of
+        * the test area.
+        */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2,
+                                    MEM_TEST_VAL_2);
+       test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1,
+                                    MEM_TEST_VAL_2);
+}
+
+static void test_memslot_unmap_loop_common(struct vm_data *data,
+                                          struct sync_area *sync,
+                                          uint64_t chunk)
+{
+       uint64_t ctr;
+
+       /*
+        * Wait for the guest to finish mapping page(s) in the first half
+        * of the test area, verify the written value and then perform unmap
+        * of this area.
+        * Meanwhile, the guest is writing to (mapping) page(s) in the second
+        * half of the test area.
+        */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
+       for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk)
+               test_memslot_do_unmap(data, ctr, chunk);
+
+       /* Likewise, but for the opposite host / guest areas */
+       host_perform_sync(sync);
+       test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2,
+                                    MEM_TEST_VAL_2);
+       for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2;
+            ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk)
+               test_memslot_do_unmap(data, ctr, chunk);
+}
+
+static void test_memslot_unmap_loop(struct vm_data *data,
+                                   struct sync_area *sync)
+{
+       test_memslot_unmap_loop_common(data, sync, 1);
+}
+
+static void test_memslot_unmap_loop_chunked(struct vm_data *data,
+                                           struct sync_area *sync)
+{
+       test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES);
+}
+
+static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
+{
+       uint64_t gptr;
+
+       for (gptr = MEM_TEST_GPA + 4096 / 2;
+            gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096)
+               *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
+
+       host_perform_sync(sync);
+
+       for (gptr = MEM_TEST_GPA;
+            gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) {
+               uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
+               uint64_t val = *vptr;
+
+               TEST_ASSERT(val == MEM_TEST_VAL_1,
+                           "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")",
+                           val, gptr);
+               *vptr = 0;
+       }
+
+       host_perform_sync(sync);
+}
+
+struct test_data {
+       const char *name;
+       uint64_t mem_size;
+       void (*guest_code)(void);
+       bool (*prepare)(struct vm_data *data, struct sync_area *sync,
+                       uint64_t *maxslots);
+       void (*loop)(struct vm_data *data, struct sync_area *sync);
+};
+
+static bool test_execute(int nslots, uint64_t *maxslots,
+                        unsigned int maxtime,
+                        const struct test_data *tdata,
+                        uint64_t *nloops,
+                        struct timespec *slot_runtime,
+                        struct timespec *guest_runtime)
+{
+       uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES;
+       struct vm_data *data;
+       struct sync_area *sync;
+       struct timespec tstart;
+       bool ret = true;
+
+       data = alloc_vm();
+       if (!prepare_vm(data, nslots, maxslots, tdata->guest_code,
+                       mem_size, slot_runtime)) {
+               ret = false;
+               goto exit_free;
+       }
+
+       sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
+
+       if (tdata->prepare &&
+           !tdata->prepare(data, sync, maxslots)) {
+               ret = false;
+               goto exit_free;
+       }
+
+       launch_vm(data);
+
+       clock_gettime(CLOCK_MONOTONIC, &tstart);
+       let_guest_run(sync);
+
+       while (1) {
+               *guest_runtime = timespec_elapsed(tstart);
+               if (guest_runtime->tv_sec >= maxtime)
+                       break;
+
+               tdata->loop(data, sync);
+
+               (*nloops)++;
+       }
+
+       make_guest_exit(sync);
+       wait_guest_exit(data);
+
+exit_free:
+       free_vm(data);
+
+       return ret;
+}
+
+static const struct test_data tests[] = {
+       {
+               .name = "map",
+               .mem_size = MEM_SIZE_MAP_PAGES,
+               .guest_code = guest_code_test_memslot_map,
+               .loop = test_memslot_map_loop,
+       },
+       {
+               .name = "unmap",
+               .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+               .guest_code = guest_code_test_memslot_unmap,
+               .loop = test_memslot_unmap_loop,
+       },
+       {
+               .name = "unmap chunked",
+               .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
+               .guest_code = guest_code_test_memslot_unmap,
+               .loop = test_memslot_unmap_loop_chunked,
+       },
+       {
+               .name = "move active area",
+               .guest_code = guest_code_test_memslot_move,
+               .prepare = test_memslot_move_prepare_active,
+               .loop = test_memslot_move_loop,
+       },
+       {
+               .name = "move inactive area",
+               .guest_code = guest_code_test_memslot_move,
+               .prepare = test_memslot_move_prepare_inactive,
+               .loop = test_memslot_move_loop,
+       },
+       {
+               .name = "RW",
+               .guest_code = guest_code_test_memslot_rw,
+               .loop = test_memslot_rw_loop
+       },
+};
+
+#define NTESTS ARRAY_SIZE(tests)
+
+struct test_args {
+       int tfirst;
+       int tlast;
+       int nslots;
+       int seconds;
+       int runs;
+};
+
+static void help(char *name, struct test_args *targs)
+{
+       int ctr;
+
+       pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n",
+               name);
+       pr_info(" -h: print this help screen.\n");
+       pr_info(" -v: enable verbose mode (not for benchmarking).\n");
+       pr_info(" -d: enable extra debug checks.\n");
+       pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n",
+               targs->nslots);
+       pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n",
+               targs->tfirst, NTESTS - 1);
+       pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n",
+               targs->tlast, NTESTS - 1);
+       pr_info(" -l: specify the test length in seconds (currently: %i)\n",
+               targs->seconds);
+       pr_info(" -r: specify the number of runs per test (currently: %i)\n",
+               targs->runs);
+
+       pr_info("\nAvailable tests:\n");
+       for (ctr = 0; ctr < NTESTS; ctr++)
+               pr_info("%d: %s\n", ctr, tests[ctr].name);
+}
+
+static bool parse_args(int argc, char *argv[],
+                      struct test_args *targs)
+{
+       int opt;
+
+       while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
+               switch (opt) {
+               case 'h':
+               default:
+                       help(argv[0], targs);
+                       return false;
+               case 'v':
+                       verbose = true;
+                       break;
+               case 'd':
+                       map_unmap_verify = true;
+                       break;
+               case 's':
+                       targs->nslots = atoi(optarg);
+                       if (targs->nslots <= 0 && targs->nslots != -1) {
+                               pr_info("Slot count cap has to be positive or -1 for no cap\n");
+                               return false;
+                       }
+                       break;
+               case 'f':
+                       targs->tfirst = atoi(optarg);
+                       if (targs->tfirst < 0) {
+                               pr_info("First test to run has to be non-negative\n");
+                               return false;
+                       }
+                       break;
+               case 'e':
+                       targs->tlast = atoi(optarg);
+                       if (targs->tlast < 0 || targs->tlast >= NTESTS) {
+                               pr_info("Last test to run has to be non-negative and less than %zu\n",
+                                       NTESTS);
+                               return false;
+                       }
+                       break;
+               case 'l':
+                       targs->seconds = atoi(optarg);
+                       if (targs->seconds < 0) {
+                               pr_info("Test length in seconds has to be non-negative\n");
+                               return false;
+                       }
+                       break;
+               case 'r':
+                       targs->runs = atoi(optarg);
+                       if (targs->runs <= 0) {
+                               pr_info("Runs per test has to be positive\n");
+                               return false;
+                       }
+                       break;
+               }
+       }
+
+       if (optind < argc) {
+               help(argv[0], targs);
+               return false;
+       }
+
+       if (targs->tfirst > targs->tlast) {
+               pr_info("First test to run cannot be greater than the last test to run\n");
+               return false;
+       }
+
+       return true;
+}
+
+struct test_result {
+       struct timespec slot_runtime, guest_runtime, iter_runtime;
+       int64_t slottimens, runtimens;
+       uint64_t nloops;
+};
+
+static bool test_loop(const struct test_data *data,
+                     const struct test_args *targs,
+                     struct test_result *rbestslottime,
+                     struct test_result *rbestruntime)
+{
+       uint64_t maxslots;
+       struct test_result result;
+
+       result.nloops = 0;
+       if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
+                         &result.nloops,
+                         &result.slot_runtime, &result.guest_runtime)) {
+               if (maxslots)
+                       pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n",
+                               maxslots);
+               else
+                       pr_info("Memslot count may be too high for this test, try adjusting the cap\n");
+
+               return false;
+       }
+
+       pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n",
+               result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec,
+               result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec);
+       if (!result.nloops) {
+               pr_info("No full loops done - too short test time or system too loaded?\n");
+               return true;
+       }
+
+       result.iter_runtime = timespec_div(result.guest_runtime,
+                                          result.nloops);
+       pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n",
+               result.nloops,
+               result.iter_runtime.tv_sec,
+               result.iter_runtime.tv_nsec);
+       result.slottimens = timespec_to_ns(result.slot_runtime);
+       result.runtimens = timespec_to_ns(result.iter_runtime);
+
+       /*
+        * Only rank the slot setup time for tests using the whole test memory
+        * area so they are comparable
+        */
+       if (!data->mem_size &&
+           (!rbestslottime->slottimens ||
+            result.slottimens < rbestslottime->slottimens))
+               *rbestslottime = result;
+       if (!rbestruntime->runtimens ||
+           result.runtimens < rbestruntime->runtimens)
+               *rbestruntime = result;
+
+       return true;
+}
+
+int main(int argc, char *argv[])
+{
+       struct test_args targs = {
+               .tfirst = 0,
+               .tlast = NTESTS - 1,
+               .nslots = -1,
+               .seconds = 5,
+               .runs = 1,
+       };
+       struct test_result rbestslottime;
+       int tctr;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       if (!parse_args(argc, argv, &targs))
+               return -1;
+
+       rbestslottime.slottimens = 0;
+       for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
+               const struct test_data *data = &tests[tctr];
+               unsigned int runctr;
+               struct test_result rbestruntime;
+
+               if (tctr > targs.tfirst)
+                       pr_info("\n");
+
+               pr_info("Testing %s performance with %i runs, %d seconds each\n",
+                       data->name, targs.runs, targs.seconds);
+
+               rbestruntime.runtimens = 0;
+               for (runctr = 0; runctr < targs.runs; runctr++)
+                       if (!test_loop(data, &targs,
+                                      &rbestslottime, &rbestruntime))
+                               break;
+
+               if (rbestruntime.runtimens)
+                       pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n",
+                               rbestruntime.iter_runtime.tv_sec,
+                               rbestruntime.iter_runtime.tv_nsec,
+                               rbestruntime.nloops);
+       }
+
+       if (rbestslottime.slottimens)
+               pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n",
+                       rbestslottime.slot_runtime.tv_sec,
+                       rbestslottime.slot_runtime.tv_nsec);
+
+       return 0;
+}
index 9b78e88..8c77537 100644 (file)
@@ -19,7 +19,12 @@ struct {
        u32 function;
        u32 index;
 } mangled_cpuids[] = {
+       /*
+        * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR,
+        * which are not controlled for by this test.
+        */
        {.function = 0xd, .index = 0},
+       {.function = 0xd, .index = 1},
 };
 
 static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
index cb953df..8aed0db 100644 (file)
@@ -37,9 +37,7 @@ static void test_get_msr_index(void)
        int old_res, res, kvm_fd, r;
        struct kvm_msr_list *list;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        old_res = kvm_num_index_msrs(kvm_fd, 0);
        TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
@@ -101,9 +99,7 @@ static void test_get_msr_feature(void)
        int res, old_res, i, kvm_fd;
        struct kvm_msr_list *feature_list;
 
-       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
-       if (kvm_fd < 0)
-               exit(KSFT_SKIP);
+       kvm_fd = open_kvm_dev_path_or_exit();
 
        old_res = kvm_num_feature_msrs(kvm_fd, 0);
        TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
index 61ae899..19deb9c 100644 (file)
@@ -30,3 +30,4 @@ hwtstamp_config
 rxtimestamp
 timestamping
 txtimestamp
+so_netns_cookie
index 3915bb7..79c9eb0 100644 (file)
@@ -30,7 +30,7 @@ TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
 TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
 TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
-TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr
+TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr so_netns_cookie
 TEST_GEN_FILES += tcp_fastopen_backup_key
 TEST_GEN_FILES += fin_ack_lat
 TEST_GEN_FILES += reuseaddr_ports_exhausted
index 614d547..6f905b5 100644 (file)
@@ -1,4 +1,5 @@
 CONFIG_USER_NS=y
+CONFIG_NET_NS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
 CONFIG_NUMA=y
index 76d9487..5abe92d 100755 (executable)
@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
        ipv4_rt_replace_mpath
 }
 
+# checks that cached input route on VRF port is deleted
+# when VRF is deleted
+ipv4_local_rt_cache()
+{
+       run_cmd "ip addr add 10.0.0.1/32 dev lo"
+       run_cmd "ip netns add test-ns"
+       run_cmd "ip link add veth-outside type veth peer name veth-inside"
+       run_cmd "ip link add vrf-100 type vrf table 1100"
+       run_cmd "ip link set veth-outside master vrf-100"
+       run_cmd "ip link set veth-inside netns test-ns"
+       run_cmd "ip link set veth-outside up"
+       run_cmd "ip link set vrf-100 up"
+       run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
+       run_cmd "ip netns exec test-ns ip link set veth-inside up"
+       run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
+       run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
+       run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
+       run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
+       run_cmd "ip link delete vrf-100"
+
+       # if we do not hang test is a success
+       log_test $? 0 "Cached route removed from VRF port device"
+}
+
 ipv4_route_test()
 {
        route_setup
 
        ipv4_rt_add
        ipv4_rt_replace
+       ipv4_local_rt_cache
 
        route_cleanup
 }
diff --git a/tools/testing/selftests/net/icmp.sh b/tools/testing/selftests/net/icmp.sh
new file mode 100755 (executable)
index 0000000..e4b04cd
--- /dev/null
@@ -0,0 +1,74 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for checking ICMP response with dummy address instead of 0.0.0.0.
+# Sets up two namespaces like:
+# +----------------------+                          +--------------------+
+# | ns1                  |    v4-via-v6 routes:     | ns2                |
+# |                      |                  '       |                    |
+# |             +--------+   -> 172.16.1.0/24 ->    +--------+           |
+# |             | veth0  +--------------------------+  veth0 |           |
+# |             +--------+   <- 172.16.0.0/24 <-    +--------+           |
+# |           172.16.0.1 |                          | 2001:db8:1::2/64   |
+# |     2001:db8:1::2/64 |                          |                    |
+# +----------------------+                          +--------------------+
+#
+# And then tries to ping 172.16.1.1 from ns1. This results in a "net
+# unreachable" message being sent from ns2, but there is no IPv4 address set in
+# that address space, so the kernel should substitute the dummy address
+# 192.0.0.8 defined in RFC7600.
+
+NS1=ns1
+NS2=ns2
+H1_IP=172.16.0.1/32
+H1_IP6=2001:db8:1::1
+RT1=172.16.1.0/24
+PINGADDR=172.16.1.1
+RT2=172.16.0.0/24
+H2_IP6=2001:db8:1::2
+
+TMPFILE=$(mktemp)
+
+cleanup()
+{
+    rm -f "$TMPFILE"
+    ip netns del $NS1
+    ip netns del $NS2
+}
+
+trap cleanup EXIT
+
+# Namespaces
+ip netns add $NS1
+ip netns add $NS2
+
+# Connectivity
+ip -netns $NS1 link add veth0 type veth peer name veth0 netns $NS2
+ip -netns $NS1 link set dev veth0 up
+ip -netns $NS2 link set dev veth0 up
+ip -netns $NS1 addr add $H1_IP dev veth0
+ip -netns $NS1 addr add $H1_IP6/64 dev veth0 nodad
+ip -netns $NS2 addr add $H2_IP6/64 dev veth0 nodad
+ip -netns $NS1 route add $RT1 via inet6 $H2_IP6
+ip -netns $NS2 route add $RT2 via inet6 $H1_IP6
+
+# Make sure ns2 will respond with ICMP unreachable
+ip netns exec $NS2 sysctl -qw net.ipv4.icmp_ratelimit=0 net.ipv4.ip_forward=1
+
+# Run the test - a ping runs in the background, and we capture ICMP responses
+# with tcpdump; -c 1 means it should exit on the first ping, but add a timeout
+# in case something goes wrong
+ip netns exec $NS1 ping -w 3 -i 0.5 $PINGADDR >/dev/null &
+ip netns exec $NS1 timeout 10 tcpdump -tpni veth0 -c 1 'icmp and icmp[icmptype] != icmp-echo' > $TMPFILE 2>/dev/null
+
+# Parse response and check for dummy address
+# tcpdump output looks like:
+# IP 192.0.0.8 > 172.16.0.1: ICMP net 172.16.1.1 unreachable, length 92
+RESP_IP=$(awk '{print $2}' < $TMPFILE)
+if [[ "$RESP_IP" != "192.0.0.8" ]]; then
+    echo "FAIL - got ICMP response from $RESP_IP, should be 192.0.0.8"
+    exit 1
+else
+    echo "OK"
+    exit 0
+fi
index bf361f3..c19ecc6 100755 (executable)
@@ -63,10 +63,14 @@ log_test()
        local rc=$1
        local expected=$2
        local msg="$3"
+       local xfail=$4
 
        if [ ${rc} -eq ${expected} ]; then
                printf "TEST: %-60s  [ OK ]\n" "${msg}"
                nsuccess=$((nsuccess+1))
+       elif [ ${rc} -eq ${xfail} ]; then
+               printf "TEST: %-60s  [XFAIL]\n" "${msg}"
+               nxfail=$((nxfail+1))
        else
                ret=1
                nfail=$((nfail+1))
@@ -322,7 +326,7 @@ check_exception()
                ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
                grep -v "mtu" | grep -q "${R1_LLADDR}"
        fi
-       log_test $? 0 "IPv6: ${desc}"
+       log_test $? 0 "IPv6: ${desc}" 1
 }
 
 run_ping()
@@ -488,6 +492,7 @@ which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
 ret=0
 nsuccess=0
 nfail=0
+nxfail=0
 
 while getopts :pv o
 do
@@ -532,5 +537,6 @@ fi
 
 printf "\nTests passed: %3d\n" ${nsuccess}
 printf "Tests failed: %3d\n"   ${nfail}
+printf "Tests xfailed: %3d\n"  ${nxfail}
 
 exit $ret
index 9ca5f1b..559173a 100755 (executable)
@@ -3,7 +3,7 @@
 
 time_start=$(date +%s)
 
-optstring="S:R:d:e:l:r:h4cm:f:t"
+optstring="S:R:d:e:l:r:h4cm:f:tC"
 ret=0
 sin=""
 sout=""
@@ -22,6 +22,7 @@ sndbuf=0
 rcvbuf=0
 options_log=true
 do_tcp=0
+checksum=false
 filesize=0
 
 if [ $tc_loss -eq 100 ];then
@@ -47,6 +48,7 @@ usage() {
        echo -e "\t-R: set rcvbuf value (default: use kernel default)"
        echo -e "\t-m: test mode (poll, sendfile; default: poll)"
        echo -e "\t-t: also run tests with TCP (use twice to non-fallback tcp)"
+       echo -e "\t-C: enable the MPTCP data checksum"
 }
 
 while getopts "$optstring" option;do
@@ -104,6 +106,9 @@ while getopts "$optstring" option;do
        "t")
                do_tcp=$((do_tcp+1))
                ;;
+       "C")
+               checksum=true
+               ;;
        "?")
                usage $0
                exit 1
@@ -197,8 +202,11 @@ ip -net "$ns4" link set ns4eth3 up
 ip -net "$ns4" route add default via 10.0.3.2
 ip -net "$ns4" route add default via dead:beef:3::2
 
-# use TCP syn cookies, even if no flooding was detected.
-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+if $checksum; then
+       for i in "$ns1" "$ns2" "$ns3" "$ns4";do
+               ip netns exec $i sysctl -q net.mptcp.checksum_enabled=1
+       done
+fi
 
 set_ethtool_flags() {
        local ns="$1"
@@ -672,6 +680,25 @@ run_tests_peekmode()
        run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-P ${peekmode}"
 }
 
+display_time()
+{
+       time_end=$(date +%s)
+       time_run=$((time_end-time_start))
+
+       echo "Time: ${time_run} seconds"
+}
+
+stop_if_error()
+{
+       local msg="$1"
+
+       if [ ${ret} -ne 0 ]; then
+               echo "FAIL: ${msg}" 1>&2
+               display_time
+               exit ${ret}
+       fi
+}
+
 make_file "$cin" "client"
 make_file "$sin" "server"
 
@@ -679,6 +706,8 @@ check_mptcp_disabled
 
 check_mptcp_ulp_setsockopt
 
+stop_if_error "The kernel configuration is not valid for MPTCP"
+
 echo "INFO: validating network environment with pings"
 for sender in "$ns1" "$ns2" "$ns3" "$ns4";do
        do_ping "$ns1" $sender 10.0.1.1
@@ -698,6 +727,8 @@ for sender in "$ns1" "$ns2" "$ns3" "$ns4";do
        do_ping "$ns4" $sender dead:beef:3::1
 done
 
+stop_if_error "Could not even run ping tests"
+
 [ -n "$tc_loss" ] && tc -net "$ns2" qdisc add dev ns2eth3 root netem loss random $tc_loss delay ${tc_delay}ms
 echo -n "INFO: Using loss of $tc_loss "
 test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms "
@@ -725,18 +756,24 @@ echo "on ns3eth4"
 
 tc -net "$ns3" qdisc add dev ns3eth4 root netem delay ${reorder_delay}ms $tc_reorder
 
+run_tests_lo "$ns1" "$ns1" 10.0.1.1 1
+stop_if_error "Could not even run loopback test"
+
+run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1
+stop_if_error "Could not even run loopback v6 test"
+
 for sender in $ns1 $ns2 $ns3 $ns4;do
-       run_tests_lo "$ns1" "$sender" 10.0.1.1 1
-       if [ $ret -ne 0 ] ;then
-               echo "FAIL: Could not even run loopback test" 1>&2
-               exit $ret
-       fi
-       run_tests_lo "$ns1" $sender dead:beef:1::1 1
-       if [ $ret -ne 0 ] ;then
-               echo "FAIL: Could not even run loopback v6 test" 2>&1
-               exit $ret
+       # ns1<->ns2 is not subject to reordering/tc delays. Use it to test
+       # mptcp syncookie support.
+       if [ $sender = $ns1 ]; then
+               ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
+       else
+               ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
        fi
 
+       run_tests "$ns1" $sender 10.0.1.1
+       run_tests "$ns1" $sender dead:beef:1::1
+
        run_tests "$ns2" $sender 10.0.1.2
        run_tests "$ns2" $sender dead:beef:1::2
        run_tests "$ns2" $sender 10.0.2.1
@@ -749,14 +786,13 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
 
        run_tests "$ns4" $sender 10.0.3.1
        run_tests "$ns4" $sender dead:beef:3::1
+
+       stop_if_error "Tests with $sender as a sender have failed"
 done
 
 run_tests_peekmode "saveWithPeek"
 run_tests_peekmode "saveAfterPeek"
+stop_if_error "Tests with peek mode have failed"
 
-time_end=$(date +%s)
-time_run=$((time_end-time_start))
-
-echo "Time: ${time_run} seconds"
-
+display_time
 exit $ret
index fd99485..9a191c1 100755 (executable)
@@ -12,6 +12,7 @@ timeout_poll=30
 timeout_test=$((timeout_poll * 2 + 1))
 mptcp_connect=""
 capture=0
+checksum=0
 do_all_tests=1
 
 TEST_COUNT=0
@@ -49,6 +50,9 @@ init()
                ip netns exec $netns sysctl -q net.mptcp.enabled=1
                ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
                ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
+               if [ $checksum -eq 1 ]; then
+                       ip netns exec $netns sysctl -q net.mptcp.checksum_enabled=1
+               fi
        done
 
        #  ns1              ns2
@@ -124,6 +128,28 @@ reset_with_add_addr_timeout()
                -j DROP
 }
 
+reset_with_checksum()
+{
+       local ns1_enable=$1
+       local ns2_enable=$2
+
+       reset
+
+       ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=$ns1_enable
+       ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=$ns2_enable
+}
+
+reset_with_allow_join_id0()
+{
+       local ns1_enable=$1
+       local ns2_enable=$2
+
+       reset
+
+       ip netns exec $ns1 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns1_enable
+       ip netns exec $ns2 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns2_enable
+}
+
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
@@ -476,6 +502,45 @@ run_tests()
        fi
 }
 
+chk_csum_nr()
+{
+       local msg=${1:-""}
+       local count
+       local dump_stats
+
+       if [ ! -z "$msg" ]; then
+               printf "%02u" "$TEST_COUNT"
+       else
+               echo -n "  "
+       fi
+       printf " %-36s %s" "$msg" "sum"
+       count=`ip netns exec $ns1 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}'`
+       [ -z "$count" ] && count=0
+       if [ "$count" != 0 ]; then
+               echo "[fail] got $count data checksum error[s] expected 0"
+               ret=1
+               dump_stats=1
+       else
+               echo -n "[ ok ]"
+       fi
+       echo -n " - csum  "
+       count=`ip netns exec $ns2 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}'`
+       [ -z "$count" ] && count=0
+       if [ "$count" != 0 ]; then
+               echo "[fail] got $count data checksum error[s] expected 0"
+               ret=1
+               dump_stats=1
+       else
+               echo "[ ok ]"
+       fi
+       if [ "${dump_stats}" = 1 ]; then
+               echo Server ns stats
+               ip netns exec $ns1 nstat -as | grep MPTcp
+               echo Client ns stats
+               ip netns exec $ns2 nstat -as | grep MPTcp
+       fi
+}
+
 chk_join_nr()
 {
        local msg="$1"
@@ -523,6 +588,9 @@ chk_join_nr()
                echo Client ns stats
                ip netns exec $ns2 nstat -as | grep MPTcp
        fi
+       if [ $checksum -eq 1 ]; then
+               chk_csum_nr
+       fi
 }
 
 chk_add_nr()
@@ -1374,6 +1442,94 @@ syncookies_tests()
        chk_add_nr 1 1
 }
 
+checksum_tests()
+{
+       # checksum test 0 0
+       reset_with_checksum 0 0
+       ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_csum_nr "checksum test 0 0"
+
+       # checksum test 1 1
+       reset_with_checksum 1 1
+       ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_csum_nr "checksum test 1 1"
+
+       # checksum test 0 1
+       reset_with_checksum 0 1
+       ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_csum_nr "checksum test 0 1"
+
+       # checksum test 1 0
+       reset_with_checksum 1 0
+       ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_csum_nr "checksum test 1 0"
+}
+
+deny_join_id0_tests()
+{
+       # subflow allow join id0 ns1
+       reset_with_allow_join_id0 1 0
+       ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_join_nr "single subflow allow join id0 ns1" 1 1 1
+
+       # subflow allow join id0 ns2
+       reset_with_allow_join_id0 0 1
+       ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_join_nr "single subflow allow join id0 ns2" 0 0 0
+
+       # signal address allow join id0 ns1
+       # ADD_ADDRs are not affected by allow_join_id0 value.
+       reset_with_allow_join_id0 1 0
+       ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_join_nr "signal address allow join id0 ns1" 1 1 1
+       chk_add_nr 1 1
+
+       # signal address allow join id0 ns2
+       # ADD_ADDRs are not affected by allow_join_id0 value.
+       reset_with_allow_join_id0 0 1
+       ip netns exec $ns1 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+       ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_join_nr "signal address allow join id0 ns2" 1 1 1
+       chk_add_nr 1 1
+
+       # subflow and address allow join id0 ns1
+       reset_with_allow_join_id0 1 0
+       ip netns exec $ns1 ./pm_nl_ctl limits 2 2
+       ip netns exec $ns2 ./pm_nl_ctl limits 2 2
+       ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+       ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_join_nr "subflow and address allow join id0 1" 2 2 2
+
+       # subflow and address allow join id0 ns2
+       reset_with_allow_join_id0 0 1
+       ip netns exec $ns1 ./pm_nl_ctl limits 2 2
+       ip netns exec $ns2 ./pm_nl_ctl limits 2 2
+       ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+       ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+       run_tests $ns1 $ns2 10.0.1.1
+       chk_join_nr "subflow and address allow join id0 2" 1 1 1
+}
+
 all_tests()
 {
        subflows_tests
@@ -1387,6 +1543,8 @@ all_tests()
        backup_tests
        add_addr_ports_tests
        syncookies_tests
+       checksum_tests
+       deny_join_id0_tests
 }
 
 usage()
@@ -1403,7 +1561,10 @@ usage()
        echo "  -b backup_tests"
        echo "  -p add_addr_ports_tests"
        echo "  -k syncookies_tests"
+       echo "  -S checksum_tests"
+       echo "  -d deny_join_id0_tests"
        echo "  -c capture pcap files"
+       echo "  -C enable data checksum"
        echo "  -h help"
 }
 
@@ -1418,13 +1579,16 @@ make_file "$sin" "server" 1
 trap cleanup EXIT
 
 for arg in "$@"; do
-       # check for "capture" arg before launching tests
+       # check for "capture/checksum" args before launching tests
        if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"c"[0-9a-zA-Z]*$ ]]; then
                capture=1
        fi
+       if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"C"[0-9a-zA-Z]*$ ]]; then
+               checksum=1
+       fi
 
-       # exception for the capture option, the rest means: a part of the tests
-       if [ "${arg}" != "-c" ]; then
+       # exception for the capture/checksum options, the rest means: a part of the tests
+       if [ "${arg}" != "-c" ] && [ "${arg}" != "-C" ]; then
                do_all_tests=0
        fi
 done
@@ -1434,7 +1598,7 @@ if [ $do_all_tests -eq 1 ]; then
        exit $ret
 fi
 
-while getopts 'fsltra64bpkch' opt; do
+while getopts 'fsltra64bpkdchCS' opt; do
        case $opt in
                f)
                        subflows_tests
@@ -1469,8 +1633,16 @@ while getopts 'fsltra64bpkch' opt; do
                k)
                        syncookies_tests
                        ;;
+               S)
+                       checksum_tests
+                       ;;
+               d)
+                       deny_join_id0_tests
+                       ;;
                c)
                        ;;
+               C)
+                       ;;
                h | *)
                        usage
                        ;;
index 3aeef3b..fd63ebf 100755 (executable)
@@ -60,6 +60,8 @@ setup()
        for i in "$ns1" "$ns2" "$ns3";do
                ip netns add $i || exit $ksft_skip
                ip -net $i link set lo up
+               ip netns exec $i sysctl -q net.ipv4.conf.all.rp_filter=0
+               ip netns exec $i sysctl -q net.ipv4.conf.default.rp_filter=0
        done
 
        ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
@@ -80,7 +82,6 @@ setup()
 
        ip netns exec "$ns1" ./pm_nl_ctl limits 1 1
        ip netns exec "$ns1" ./pm_nl_ctl add 10.0.2.1 dev ns1eth2 flags subflow
-       ip netns exec "$ns1" sysctl -q net.ipv4.conf.all.rp_filter=0
 
        ip -net "$ns2" addr add 10.0.1.2/24 dev ns2eth1
        ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
diff --git a/tools/testing/selftests/net/so_netns_cookie.c b/tools/testing/selftests/net/so_netns_cookie.c
new file mode 100644 (file)
index 0000000..b39e87e
--- /dev/null
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <sched.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#ifndef SO_NETNS_COOKIE
+#define SO_NETNS_COOKIE 71
+#endif
+
+#define pr_err(fmt, ...) \
+       ({ \
+               fprintf(stderr, "%s:%d:" fmt ": %m\n", \
+                       __func__, __LINE__, ##__VA_ARGS__); \
+               1; \
+       })
+
+int main(int argc, char *argvp[])
+{
+       uint64_t cookie1, cookie2;
+       socklen_t vallen;
+       int sock1, sock2;
+
+       sock1 = socket(AF_INET, SOCK_STREAM, 0);
+       if (sock1 < 0)
+               return pr_err("Unable to create TCP socket");
+
+       vallen = sizeof(cookie1);
+       if (getsockopt(sock1, SOL_SOCKET, SO_NETNS_COOKIE, &cookie1, &vallen) != 0)
+               return pr_err("getsockopt(SOL_SOCKET, SO_NETNS_COOKIE)");
+
+       if (!cookie1)
+               return pr_err("SO_NETNS_COOKIE returned zero cookie");
+
+       if (unshare(CLONE_NEWNET))
+               return pr_err("unshare");
+
+       sock2 = socket(AF_INET, SOCK_STREAM, 0);
+       if (sock2 < 0)
+               return pr_err("Unable to create TCP socket");
+
+       vallen = sizeof(cookie2);
+       if (getsockopt(sock2, SOL_SOCKET, SO_NETNS_COOKIE, &cookie2, &vallen) != 0)
+               return pr_err("getsockopt(SOL_SOCKET, SO_NETNS_COOKIE)");
+
+       if (!cookie2)
+               return pr_err("SO_NETNS_COOKIE returned zero cookie");
+
+       if (cookie1 == cookie2)
+               return pr_err("SO_NETNS_COOKIE returned identical cookies for distinct ns");
+
+       close(sock1);
+       close(sock2);
+       return 0;
+}
diff --git a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
new file mode 100755 (executable)
index 0000000..75ada17
--- /dev/null
@@ -0,0 +1,573 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+# author: Paolo Lungaroni <paolo.lungaroni@uniroma2.it>
+
+# This test is designed for evaluating the new SRv6 End.DT46 Behavior used for
+# implementing IPv4/IPv6 L3 VPN use cases.
+#
+# The current SRv6 code in the Linux kernel only implements SRv6 End.DT4 and
+# End.DT6 Behaviors which can be used respectively to support IPv4-in-IPv6 and
+# IPv6-in-IPv6 VPNs. With End.DT4 and End.DT6 it is not possible to create a
+# single SRv6 VPN tunnel to carry both IPv4 and IPv6 traffic.
+# The SRv6 End.DT46 Behavior implementation is meant to support the
+# decapsulation of IPv4 and IPv6 traffic coming from a single SRv6 tunnel.
+# Therefore, the SRv6 End.DT46 Behavior in the Linux kernel greatly simplifies
+# the setup and operations of SRv6 VPNs.
+#
+# Hereafter a network diagram is shown, where two different tenants (named 100
+# and 200) offer IPv4/IPv6 L3 VPN services allowing hosts to communicate with
+# each other across an IPv6 network.
+#
+# Only hosts belonging to the same tenant (and to the same VPN) can communicate
+# with each other. Instead, the communication among hosts of different tenants
+# is forbidden.
+# In other words, hosts hs-t100-1 and hs-t100-2 are connected through the
+# IPv4/IPv6 L3 VPN of tenant 100 while hs-t200-3 and hs-t200-4 are connected
+# using the IPv4/IPv6 L3 VPN of tenant 200. Cross connection between tenant 100
+# and tenant 200 is forbidden and thus, for example, hs-t100-1 cannot reach
+# hs-t200-3 and vice versa.
+#
+# Routers rt-1 and rt-2 implement IPv4/IPv6 L3 VPN services leveraging the SRv6
+# architecture. The key components for such VPNs are: a) SRv6 Encap behavior,
+# b) SRv6 End.DT46 Behavior and c) VRF.
+#
+# To explain how an IPv4/IPv6 L3 VPN based on SRv6 works, let us briefly
+# consider an example where, within the same domain of tenant 100, the host
+# hs-t100-1 pings the host hs-t100-2.
+#
+# First of all, L2 reachability of the host hs-t100-2 is taken into account by
+# the router rt-1 which acts as a arp/ndp proxy.
+#
+# When the host hs-t100-1 sends an IPv6 or IPv4 packet destined to hs-t100-2,
+# the router rt-1 receives the packet on the internal veth-t100 interface. Such
+# interface is enslaved to the VRF vrf-100 whose associated table contains the
+# SRv6 Encap route for encapsulating any IPv6 or IPv4 packet in a IPv6 plus the
+# Segment Routing Header (SRH) packet. This packet is sent through the (IPv6)
+# core network up to the router rt-2 that receives it on veth0 interface.
+#
+# The rt-2 router uses the 'localsid' routing table to process incoming
+# IPv6+SRH packets which belong to the VPN of the tenant 100. For each of these
+# packets, the SRv6 End.DT46 Behavior removes the outer IPv6+SRH headers and
+# performs the lookup on the vrf-100 table using the destination address of
+# the decapsulated IPv6 or IPv4 packet. Afterwards, the packet is sent to the
+# host hs-t100-2 through the veth-t100 interface.
+#
+# The ping response follows the same processing but this time the roles of rt-1
+# and rt-2 are swapped.
+#
+# Of course, the IPv4/IPv6 L3 VPN for tenant 200 works exactly as the IPv4/IPv6
+# L3 VPN for tenant 100. In this case, only hosts hs-t200-3 and hs-t200-4 are
+# able to connect with each other.
+#
+#
+# +-------------------+                                   +-------------------+
+# |                   |                                   |                   |
+# |  hs-t100-1 netns  |                                   |  hs-t100-2 netns  |
+# |                   |                                   |                   |
+# |  +-------------+  |                                   |  +-------------+  |
+# |  |    veth0    |  |                                   |  |    veth0    |  |
+# |  |  cafe::1/64 |  |                                   |  |  cafe::2/64 |  |
+# |  | 10.0.0.1/24 |  |                                   |  | 10.0.0.2/24 |  |
+# |  +-------------+  |                                   |  +-------------+  |
+# |        .          |                                   |         .         |
+# +-------------------+                                   +-------------------+
+#          .                                                        .
+#          .                                                        .
+#          .                                                        .
+# +-----------------------------------+   +-----------------------------------+
+# |        .                          |   |                         .         |
+# | +---------------+                 |   |                 +---------------- |
+# | |   veth-t100   |                 |   |                 |   veth-t100   | |
+# | |  cafe::254/64 |                 |   |                 |  cafe::254/64 | |
+# | | 10.0.0.254/24 |    +----------+ |   | +----------+    | 10.0.0.254/24 | |
+# | +-------+-------+    | localsid | |   | | localsid |    +-------+-------- |
+# |         |            |   table  | |   | |   table  |            |         |
+# |    +----+----+       +----------+ |   | +----------+       +----+----+    |
+# |    | vrf-100 |                    |   |                    | vrf-100 |    |
+# |    +---------+     +------------+ |   | +------------+     +---------+    |
+# |                    |   veth0    | |   | |   veth0    |                    |
+# |                    | fd00::1/64 |.|...|.| fd00::2/64 |                    |
+# |    +---------+     +------------+ |   | +------------+     +---------+    |
+# |    | vrf-200 |                    |   |                    | vrf-200 |    |
+# |    +----+----+                    |   |                    +----+----+    |
+# |         |                         |   |                         |         |
+# | +-------+-------+                 |   |                 +-------+-------- |
+# | |   veth-t200   |                 |   |                 |   veth-t200   | |
+# | |  cafe::254/64 |                 |   |                 |  cafe::254/64 | |
+# | | 10.0.0.254/24 |                 |   |                 | 10.0.0.254/24 | |
+# | +---------------+      rt-1 netns |   | rt-2 netns      +---------------- |
+# |        .                          |   |                          .        |
+# +-----------------------------------+   +-----------------------------------+
+#          .                                                         .
+#          .                                                         .
+#          .                                                         .
+#          .                                                         .
+# +-------------------+                                   +-------------------+
+# |        .          |                                   |          .        |
+# |  +-------------+  |                                   |  +-------------+  |
+# |  |    veth0    |  |                                   |  |    veth0    |  |
+# |  |  cafe::3/64 |  |                                   |  |  cafe::4/64 |  |
+# |  | 10.0.0.3/24 |  |                                   |  | 10.0.0.4/24 |  |
+# |  +-------------+  |                                   |  +-------------+  |
+# |                   |                                   |                   |
+# |  hs-t200-3 netns  |                                   |  hs-t200-4 netns  |
+# |                   |                                   |                   |
+# +-------------------+                                   +-------------------+
+#
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+# | Network configuration |
+# ~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# rt-1: localsid table (table 90)
+# +--------------------------------------------------+
+# |SID              |Action                          |
+# +--------------------------------------------------+
+# |fc00:21:100::6046|apply SRv6 End.DT46 vrftable 100|
+# +--------------------------------------------------+
+# |fc00:21:200::6046|apply SRv6 End.DT46 vrftable 200|
+# +--------------------------------------------------+
+#
+# rt-1: VRF tenant 100 (table 100)
+# +---------------------------------------------------+
+# |host       |Action                                 |
+# +---------------------------------------------------+
+# |cafe::2    |apply seg6 encap segs fc00:12:100::6046|
+# +---------------------------------------------------+
+# |cafe::/64  |forward to dev veth-t100               |
+# +---------------------------------------------------+
+# |10.0.0.2   |apply seg6 encap segs fc00:12:100::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t100               |
+# +---------------------------------------------------+
+#
+# rt-1: VRF tenant 200 (table 200)
+# +---------------------------------------------------+
+# |host       |Action                                 |
+# +---------------------------------------------------+
+# |cafe::4    |apply seg6 encap segs fc00:12:200::6046|
+# +---------------------------------------------------+
+# |cafe::/64  |forward to dev veth-t200               |
+# +---------------------------------------------------+
+# |10.0.0.4   |apply seg6 encap segs fc00:12:200::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t200               |
+# +---------------------------------------------------+
+#
+#
+# rt-2: localsid table (table 90)
+# +--------------------------------------------------+
+# |SID              |Action                          |
+# +--------------------------------------------------+
+# |fc00:12:100::6046|apply SRv6 End.DT46 vrftable 100|
+# +--------------------------------------------------+
+# |fc00:12:200::6046|apply SRv6 End.DT46 vrftable 200|
+# +--------------------------------------------------+
+#
+# rt-2: VRF tenant 100 (table 100)
+# +---------------------------------------------------+
+# |host       |Action                                 |
+# +---------------------------------------------------+
+# |cafe::1    |apply seg6 encap segs fc00:21:100::6046|
+# +---------------------------------------------------+
+# |cafe::/64  |forward to dev veth-t100               |
+# +---------------------------------------------------+
+# |10.0.0.1   |apply seg6 encap segs fc00:21:100::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t100               |
+# +---------------------------------------------------+
+#
+# rt-2: VRF tenant 200 (table 200)
+# +---------------------------------------------------+
+# |host       |Action                                 |
+# +---------------------------------------------------+
+# |cafe::3    |apply seg6 encap segs fc00:21:200::6046|
+# +---------------------------------------------------+
+# |cafe::/64  |forward to dev veth-t200               |
+# +---------------------------------------------------+
+# |10.0.0.3   |apply seg6 encap segs fc00:21:200::6046|
+# +---------------------------------------------------+
+# |10.0.0.0/24|forward to dev veth-t200               |
+# +---------------------------------------------------+
+#
+
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fd00
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fc00
+PING_TIMEOUT_SEC=4
+
+ret=0
+
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+log_test()
+{
+       local rc=$1
+       local expected=$2
+       local msg="$3"
+
+       if [ ${rc} -eq ${expected} ]; then
+               nsuccess=$((nsuccess+1))
+               printf "\n    TEST: %-60s  [ OK ]\n" "${msg}"
+       else
+               ret=1
+               nfail=$((nfail+1))
+               printf "\n    TEST: %-60s  [FAIL]\n" "${msg}"
+               if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                       echo
+                       echo "hit enter to continue, 'q' to quit"
+                       read a
+                       [ "$a" = "q" ] && exit 1
+               fi
+       fi
+}
+
+print_log_test_results()
+{
+       if [ "$TESTS" != "none" ]; then
+               printf "\nTests passed: %3d\n" ${nsuccess}
+               printf "Tests failed: %3d\n"   ${nfail}
+       fi
+}
+
+log_section()
+{
+       echo
+       echo "################################################################################"
+       echo "TEST SECTION: $*"
+       echo "################################################################################"
+}
+
+cleanup()
+{
+       ip link del veth-rt-1 2>/dev/null || true
+       ip link del veth-rt-2 2>/dev/null || true
+
+       # destroy routers rt-* and hosts hs-*
+       for ns in $(ip netns show | grep -E 'rt-*|hs-*'); do
+               ip netns del ${ns} || true
+       done
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+       local rt=$1
+       local nsname=rt-${rt}
+
+       ip netns add ${nsname}
+       ip link set veth-rt-${rt} netns ${nsname}
+       ip -netns ${nsname} link set veth-rt-${rt} name veth0
+
+       ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+       ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
+       ip -netns ${nsname} link set veth0 up
+       ip -netns ${nsname} link set lo up
+
+       ip netns exec ${nsname} sysctl -wq net.ipv4.ip_forward=1
+       ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.forwarding=1
+}
+
+setup_hs()
+{
+       local hs=$1
+       local rt=$2
+       local tid=$3
+       local hsname=hs-t${tid}-${hs}
+       local rtname=rt-${rt}
+       local rtveth=veth-t${tid}
+
+       # set the networking for the host
+       ip netns add ${hsname}
+
+       ip netns exec ${hsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec ${hsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+       ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+       ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+       ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad
+       ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0
+       ip -netns ${hsname} link set veth0 up
+       ip -netns ${hsname} link set lo up
+
+       # configure the VRF for the tenant X on the router which is directly
+       # connected to the source host.
+       ip -netns ${rtname} link add vrf-${tid} type vrf table ${tid}
+       ip -netns ${rtname} link set vrf-${tid} up
+
+       ip netns exec ${rtname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec ${rtname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+       # enslave the veth-tX interface to the vrf-X in the access router
+       ip -netns ${rtname} link set ${rtveth} master vrf-${tid}
+       ip -netns ${rtname} addr add ${IPv6_HS_NETWORK}::254/64 dev ${rtveth} nodad
+       ip -netns ${rtname} addr add ${IPv4_HS_NETWORK}.254/24 dev ${rtveth}
+       ip -netns ${rtname} link set ${rtveth} up
+
+       ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1
+       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+
+       # disable the rp_filter otherwise the kernel gets confused about how
+       # to route decap ipv4 packets.
+       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
+
+       ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+}
+
+setup_vpn_config()
+{
+       local hssrc=$1
+       local rtsrc=$2
+       local hsdst=$3
+       local rtdst=$4
+       local tid=$5
+
+       local hssrc_name=hs-t${tid}-${hssrc}
+       local hsdst_name=hs-t${tid}-${hsdst}
+       local rtsrc_name=rt-${rtsrc}
+       local rtdst_name=rt-${rtdst}
+       local rtveth=veth-t${tid}
+       local vpn_sid=${VPN_LOCATOR_SERVICE}:${hssrc}${hsdst}:${tid}::6046
+
+       ip -netns ${rtsrc_name} -6 neigh add proxy ${IPv6_HS_NETWORK}::${hsdst} dev ${rtveth}
+
+       # set the encap route for encapsulating packets which arrive from the
+       # host hssrc and destined to the access router rtsrc.
+       ip -netns ${rtsrc_name} -6 route add ${IPv6_HS_NETWORK}::${hsdst}/128 vrf vrf-${tid} \
+               encap seg6 mode encap segs ${vpn_sid} dev veth0
+       ip -netns ${rtsrc_name} -4 route add ${IPv4_HS_NETWORK}.${hsdst}/32 vrf vrf-${tid} \
+               encap seg6 mode encap segs ${vpn_sid} dev veth0
+       ip -netns ${rtsrc_name} -6 route add ${vpn_sid}/128 vrf vrf-${tid} \
+               via fd00::${rtdst} dev veth0
+
+       # set the decap route for decapsulating packets which arrive from
+       # the rtdst router and destined to the hsdst host.
+       ip -netns ${rtdst_name} -6 route add ${vpn_sid}/128 table ${LOCALSID_TABLE_ID} \
+               encap seg6local action End.DT46 vrftable ${tid} dev vrf-${tid}
+
+       # all sids for VPNs start with a common locator which is fc00::/16.
+       # Routes for handling the SRv6 End.DT46 behavior instances are grouped
+       # together in the 'localsid' table.
+       #
+       # NOTE: added only once
+       if [ -z "$(ip -netns ${rtdst_name} -6 rule show | \
+           grep "to ${VPN_LOCATOR_SERVICE}::/16 lookup ${LOCALSID_TABLE_ID}")" ]; then
+               ip -netns ${rtdst_name} -6 rule add \
+                       to ${VPN_LOCATOR_SERVICE}::/16 \
+                       lookup ${LOCALSID_TABLE_ID} prio 999
+       fi
+
+       # set default routes to unreachable for both ipv4 and ipv6
+       ip -netns ${rtsrc_name} -6 route add unreachable default metric 4278198272 \
+               vrf vrf-${tid}
+
+       ip -netns ${rtsrc_name} -4 route add unreachable default metric 4278198272 \
+               vrf vrf-${tid}
+}
+
+setup()
+{
+       ip link add veth-rt-1 type veth peer name veth-rt-2
+       # setup the networking for router rt-1 and router rt-2
+       setup_rt_networking 1
+       setup_rt_networking 2
+
+       # setup two hosts for the tenant 100.
+       #  - host hs-1 is directly connected to the router rt-1;
+       #  - host hs-2 is directly connected to the router rt-2.
+       setup_hs 1 1 100  #args: host router tenant
+       setup_hs 2 2 100
+
+       # setup two hosts for the tenant 200
+       #  - host hs-3 is directly connected to the router rt-1;
+       #  - host hs-4 is directly connected to the router rt-2.
+       setup_hs 3 1 200
+       setup_hs 4 2 200
+
+       # setup the IPv4/IPv6 L3 VPN which connects the host hs-t100-1 and host
+       # hs-t100-2 within the same tenant 100.
+       setup_vpn_config 1 1 2 2 100  #args: src_host src_router dst_host dst_router tenant
+       setup_vpn_config 2 2 1 1 100
+
+       # setup the IPv4/IPv6 L3 VPN which connects the host hs-t200-3 and host
+       # hs-t200-4 within the same tenant 200.
+       setup_vpn_config 3 1 4 2 200
+       setup_vpn_config 4 2 3 1 200
+}
+
+check_rt_connectivity()
+{
+       local rtsrc=$1
+       local rtdst=$2
+
+       ip netns exec rt-${rtsrc} ping -c 1 -W 1 ${IPv6_RT_NETWORK}::${rtdst} \
+               >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+       local rtsrc=$1
+       local rtdst=$2
+
+       check_rt_connectivity ${rtsrc} ${rtdst}
+       log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+       local hssrc=$1
+       local hsdst=$2
+       local tid=$3
+
+       ip netns exec hs-t${tid}-${hssrc} ping -c 1 -W ${PING_TIMEOUT_SEC} \
+               ${IPv6_HS_NETWORK}::${hsdst} >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+       local hssrc=$1
+       local hsdst=$2
+       local tid=$3
+
+       ip netns exec hs-t${tid}-${hssrc} ping -c 1 -W ${PING_TIMEOUT_SEC} \
+               ${IPv4_HS_NETWORK}.${hsdst} >/dev/null 2>&1
+}
+
+check_and_log_hs_connectivity()
+{
+       local hssrc=$1
+       local hsdst=$2
+       local tid=$3
+
+       check_hs_ipv6_connectivity ${hssrc} ${hsdst} ${tid}
+       log_test $? 0 "IPv6 Hosts connectivity: hs-t${tid}-${hssrc} -> hs-t${tid}-${hsdst} (tenant ${tid})"
+
+       check_hs_ipv4_connectivity ${hssrc} ${hsdst} ${tid}
+       log_test $? 0 "IPv4 Hosts connectivity: hs-t${tid}-${hssrc} -> hs-t${tid}-${hsdst} (tenant ${tid})"
+
+}
+
+check_and_log_hs_isolation()
+{
+       local hssrc=$1
+       local tidsrc=$2
+       local hsdst=$3
+       local tiddst=$4
+
+       check_hs_ipv6_connectivity ${hssrc} ${hsdst} ${tidsrc}
+       # NOTE: ping should fail
+       log_test $? 1 "IPv6 Hosts isolation: hs-t${tidsrc}-${hssrc} -X-> hs-t${tiddst}-${hsdst}"
+
+       check_hs_ipv4_connectivity ${hssrc} ${hsdst} ${tidsrc}
+       # NOTE: ping should fail
+       log_test $? 1 "IPv4 Hosts isolation: hs-t${tidsrc}-${hssrc} -X-> hs-t${tiddst}-${hsdst}"
+
+}
+
+
+check_and_log_hs2gw_connectivity()
+{
+       local hssrc=$1
+       local tid=$2
+
+       check_hs_ipv6_connectivity ${hssrc} 254 ${tid}
+       log_test $? 0 "IPv6 Hosts connectivity: hs-t${tid}-${hssrc} -> gw (tenant ${tid})"
+
+       check_hs_ipv4_connectivity ${hssrc} 254 ${tid}
+       log_test $? 0 "IPv4 Hosts connectivity: hs-t${tid}-${hssrc} -> gw (tenant ${tid})"
+
+}
+
+router_tests()
+{
+       log_section "IPv6 routers connectivity test"
+
+       check_and_log_rt_connectivity 1 2
+       check_and_log_rt_connectivity 2 1
+}
+
+host2gateway_tests()
+{
+       log_section "IPv4/IPv6 connectivity test among hosts and gateway"
+
+       check_and_log_hs2gw_connectivity 1 100
+       check_and_log_hs2gw_connectivity 2 100
+
+       check_and_log_hs2gw_connectivity 3 200
+       check_and_log_hs2gw_connectivity 4 200
+}
+
+host_vpn_tests()
+{
+       log_section "SRv6 VPN connectivity test among hosts in the same tenant"
+
+       check_and_log_hs_connectivity 1 2 100
+       check_and_log_hs_connectivity 2 1 100
+
+       check_and_log_hs_connectivity 3 4 200
+       check_and_log_hs_connectivity 4 3 200
+}
+
+host_vpn_isolation_tests()
+{
+       local i
+       local j
+       local k
+       local tmp
+       local l1="1 2"
+       local l2="3 4"
+       local t1=100
+       local t2=200
+
+       log_section "SRv6 VPN isolation test among hosts in different tentants"
+
+       for k in 0 1; do
+               for i in ${l1}; do
+                       for j in ${l2}; do
+                               check_and_log_hs_isolation ${i} ${t1} ${j} ${t2}
+                       done
+               done
+
+               # let us test the reverse path
+               tmp="${l1}"; l1="${l2}"; l2="${tmp}"
+               tmp=${t1}; t1=${t2}; t2=${tmp}
+       done
+}
+
+if [ "$(id -u)" -ne 0 ];then
+       echo "SKIP: Need root privileges"
+       exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+       echo "SKIP: Could not run test without ip tool"
+       exit 0
+fi
+
+modprobe vrf &>/dev/null
+if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
+        echo "SKIP: vrf sysctl does not exist"
+        exit 0
+fi
+
+cleanup &>/dev/null
+
+setup
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+host_vpn_isolation_tests
+
+print_log_test_results
+
+cleanup &>/dev/null
+
+exit ${ret}
index 426d078..112d41d 100644 (file)
 #define TLS_PAYLOAD_MAX_LEN 16384
 #define SOL_TLS 282
 
+struct tls_crypto_info_keys {
+       union {
+               struct tls12_crypto_info_aes_gcm_128 aes128;
+               struct tls12_crypto_info_chacha20_poly1305 chacha20;
+       };
+       size_t len;
+};
+
+static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
+                                struct tls_crypto_info_keys *tls12)
+{
+       memset(tls12, 0, sizeof(*tls12));
+
+       switch (cipher_type) {
+       case TLS_CIPHER_CHACHA20_POLY1305:
+               tls12->len = sizeof(struct tls12_crypto_info_chacha20_poly1305);
+               tls12->chacha20.info.version = tls_version;
+               tls12->chacha20.info.cipher_type = cipher_type;
+               break;
+       case TLS_CIPHER_AES_GCM_128:
+               tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_128);
+               tls12->aes128.info.version = tls_version;
+               tls12->aes128.info.cipher_type = cipher_type;
+               break;
+       default:
+               break;
+       }
+}
+
+static void memrnd(void *s, size_t n)
+{
+       int *dword = s;
+       char *byte;
+
+       for (; n >= 4; n -= 4)
+               *dword++ = rand();
+       byte = (void *)dword;
+       while (n--)
+               *byte++ = rand();
+}
+
 FIXTURE(tls_basic)
 {
        int fd, cfd;
@@ -133,33 +174,16 @@ FIXTURE_VARIANT_ADD(tls, 13_chacha)
 
 FIXTURE_SETUP(tls)
 {
-       union {
-               struct tls12_crypto_info_aes_gcm_128 aes128;
-               struct tls12_crypto_info_chacha20_poly1305 chacha20;
-       } tls12;
+       struct tls_crypto_info_keys tls12;
        struct sockaddr_in addr;
        socklen_t len;
        int sfd, ret;
-       size_t tls12_sz;
 
        self->notls = false;
        len = sizeof(addr);
 
-       memset(&tls12, 0, sizeof(tls12));
-       switch (variant->cipher_type) {
-       case TLS_CIPHER_CHACHA20_POLY1305:
-               tls12_sz = sizeof(struct tls12_crypto_info_chacha20_poly1305);
-               tls12.chacha20.info.version = variant->tls_version;
-               tls12.chacha20.info.cipher_type = variant->cipher_type;
-               break;
-       case TLS_CIPHER_AES_GCM_128:
-               tls12_sz = sizeof(struct tls12_crypto_info_aes_gcm_128);
-               tls12.aes128.info.version = variant->tls_version;
-               tls12.aes128.info.cipher_type = variant->cipher_type;
-               break;
-       default:
-               tls12_sz = 0;
-       }
+       tls_crypto_info_init(variant->tls_version, variant->cipher_type,
+                            &tls12);
 
        addr.sin_family = AF_INET;
        addr.sin_addr.s_addr = htonl(INADDR_ANY);
@@ -187,7 +211,7 @@ FIXTURE_SETUP(tls)
 
        if (!self->notls) {
                ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
-                                tls12_sz);
+                                tls12.len);
                ASSERT_EQ(ret, 0);
        }
 
@@ -200,7 +224,7 @@ FIXTURE_SETUP(tls)
                ASSERT_EQ(ret, 0);
 
                ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
-                                tls12_sz);
+                                tls12.len);
                ASSERT_EQ(ret, 0);
        }
 
@@ -308,6 +332,8 @@ TEST_F(tls, recv_max)
        char recv_mem[TLS_PAYLOAD_MAX_LEN];
        char buf[TLS_PAYLOAD_MAX_LEN];
 
+       memrnd(buf, sizeof(buf));
+
        EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
        EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
        EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
@@ -588,6 +614,8 @@ TEST_F(tls, recvmsg_single_max)
        struct iovec vec;
        struct msghdr hdr;
 
+       memrnd(send_mem, sizeof(send_mem));
+
        EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
        vec.iov_base = (char *)recv_mem;
        vec.iov_len = TLS_PAYLOAD_MAX_LEN;
@@ -610,6 +638,8 @@ TEST_F(tls, recvmsg_multiple)
        struct msghdr hdr;
        int i;
 
+       memrnd(buf, sizeof(buf));
+
        EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
        for (i = 0; i < msg_iovlen; i++) {
                iov_base[i] = (char *)malloc(iov_len);
@@ -634,6 +664,8 @@ TEST_F(tls, single_send_multiple_recv)
        char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
        char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
 
+       memrnd(send_mem, sizeof(send_mem));
+
        EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
        memset(recv_mem, 0, total_len);
 
@@ -834,18 +866,17 @@ TEST_F(tls, bidir)
        int ret;
 
        if (!self->notls) {
-               struct tls12_crypto_info_aes_gcm_128 tls12;
+               struct tls_crypto_info_keys tls12;
 
-               memset(&tls12, 0, sizeof(tls12));
-               tls12.info.version = variant->tls_version;
-               tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+               tls_crypto_info_init(variant->tls_version, variant->cipher_type,
+                                    &tls12);
 
                ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12,
-                                sizeof(tls12));
+                                tls12.len);
                ASSERT_EQ(ret, 0);
 
                ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12,
-                                sizeof(tls12));
+                                tls12.len);
                ASSERT_EQ(ret, 0);
        }
 
index a8fa641..7f26591 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 readonly BASE="ns-$(mktemp -u XXXXXX)"
index 2fedc07..11d7cdb 100755 (executable)
@@ -18,7 +18,8 @@ ret=0
 
 cleanup() {
        local ns
-       local -r jobs="$(jobs -p)"
+       local jobs
+       readonly jobs="$(jobs -p)"
        [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
        rm -f $STATS
 
@@ -108,7 +109,7 @@ chk_gro() {
 
 if [ ! -f ../bpf/xdp_dummy.o ]; then
        echo "Missing xdp_dummy helper. Build bpf selftest first"
-       exit -1
+       exit 1
 fi
 
 create_ns
index 3171069..cd6430b 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
+TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
        nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
new file mode 100755 (executable)
index 0000000..6caf6ac
--- /dev/null
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+# This tests the fib expression.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsrouter="nsrouter-$sfx"
+timeout=4
+
+log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
+
+cleanup()
+{
+       ip netns del ${ns1}
+       ip netns del ${ns2}
+       ip netns del ${nsrouter}
+
+       [ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+ip netns add ${nsrouter}
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace"
+       exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+dmesg | grep -q ' nft_rpfilter: '
+if [ $? -eq 0 ]; then
+       dmesg -c | grep ' nft_rpfilter: '
+       echo "WARN: a previous test run has failed" 1>&2
+fi
+
+sysctl -q net.netfilter.nf_log_all_netns=1
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+load_ruleset() {
+       local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+       chain prerouting {
+               type filter hook prerouting priority 0; policy accept;
+               fib saddr . iif oif missing counter log prefix "$netns nft_rpfilter: " drop
+       }
+}
+EOF
+}
+
+load_ruleset_count() {
+       local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+       chain prerouting {
+               type filter hook prerouting priority 0; policy accept;
+               ip daddr 1.1.1.1 fib saddr . iif oif missing counter drop
+               ip6 daddr 1c3::c01d fib saddr . iif oif missing counter drop
+       }
+}
+EOF
+}
+
+check_drops() {
+       dmesg | grep -q ' nft_rpfilter: '
+       if [ $? -eq 0 ]; then
+               dmesg | grep ' nft_rpfilter: '
+               echo "FAIL: rpfilter did drop packets"
+               return 1
+       fi
+
+       return 0
+}
+
+check_fib_counter() {
+       local want=$1
+       local ns=$2
+       local address=$3
+
+       line=$(ip netns exec ${ns} nft list table inet filter | grep 'fib saddr . iif' | grep $address | grep "packets $want" )
+       ret=$?
+
+       if [ $ret -ne 0 ];then
+               echo "Netns $ns fib counter doesn't match expected packet count of $want for $address" 1>&2
+               ip netns exec ${ns} nft list table inet filter
+               return 1
+       fi
+
+       if [ $want -gt 0 ]; then
+               echo "PASS: fib expression did drop packets for $address"
+       fi
+
+       return 0
+}
+
+load_ruleset ${nsrouter}
+load_ruleset ${ns1}
+load_ruleset ${ns2}
+
+ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+fi
+ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2}
+
+ip -net ${nsrouter} link set lo up
+ip -net ${nsrouter} link set veth0 up
+ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0
+ip -net ${nsrouter} addr add dead:1::1/64 dev veth0
+
+ip -net ${nsrouter} link set veth1 up
+ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth1
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set eth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set eth0 up
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+ip -net ${ns1} route add default via 10.0.1.1
+ip -net ${ns1} route add default via dead:1::1
+
+ip -net ${ns2} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns2} addr add dead:2::99/64 dev eth0
+ip -net ${ns2} route add default via 10.0.2.1
+ip -net ${ns2} route add default via dead:2::1
+
+test_ping() {
+  local daddr4=$1
+  local daddr6=$2
+
+  ip netns exec ${ns1} ping -c 1 -q $daddr4 > /dev/null
+  ret=$?
+  if [ $ret -ne 0 ];then
+       check_drops
+       echo "FAIL: ${ns1} cannot reach $daddr4, ret $ret" 1>&2
+       return 1
+  fi
+
+  ip netns exec ${ns1} ping -c 3 -q $daddr6 > /dev/null
+  ret=$?
+  if [ $ret -ne 0 ];then
+       check_drops
+       echo "FAIL: ${ns1} cannot reach $daddr6, ret $ret" 1>&2
+       return 1
+  fi
+
+  return 0
+}
+
+ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+sleep 3
+
+test_ping 10.0.2.1 dead:2::1 || exit 1
+check_drops || exit 1
+
+test_ping 10.0.2.99 dead:2::99 || exit 1
+check_drops || exit 1
+
+echo "PASS: fib expression did not cause unwanted packet drops"
+
+ip netns exec ${nsrouter} nft flush table inet filter
+
+ip -net ${ns1} route del default
+ip -net ${ns1} -6 route del default
+
+ip -net ${ns1} addr del 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr del dead:1::99/64 dev eth0
+
+ip -net ${ns1} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns1} addr add dead:2::99/64 dev eth0
+
+ip -net ${ns1} route add default via 10.0.2.1
+ip -net ${ns1} -6 route add default via dead:2::1
+
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth0
+
+# switch to ruleset that doesn't log, this time
+# its expected that this does drop the packets.
+load_ruleset_count ${nsrouter}
+
+# ns1 has a default route, but nsrouter does not.
+# must not check return value, ping to 1.1.1.1 will
+# fail.
+check_fib_counter 0 ${nsrouter} 1.1.1.1 || exit 1
+check_fib_counter 0 ${nsrouter} 1c3::c01d || exit 1
+
+ip netns exec ${ns1} ping -c 1 -W 1 -q 1.1.1.1 > /dev/null
+check_fib_counter 1 ${nsrouter} 1.1.1.1 || exit 1
+
+sleep 2
+ip netns exec ${ns1} ping -c 3 -q 1c3::c01d > /dev/null
+check_fib_counter 3 ${nsrouter} 1c3::c01d || exit 1
+
+exit 0
index bed4b53..8f3e72e 100644 (file)
@@ -10,6 +10,7 @@
 /proc-self-map-files-002
 /proc-self-syscall
 /proc-self-wchan
+/proc-subset-pid
 /proc-uptime-001
 /proc-uptime-002
 /read
index 229ee18..254136e 100644 (file)
@@ -29,22 +29,26 @@ class SubPlugin(TdcPlugin):
             return
 
         # Check for required fields
-        scapyinfo = self.args.caseinfo['scapy']
-        scapy_keys = ['iface', 'count', 'packet']
-        missing_keys = []
-        keyfail = False
-        for k in scapy_keys:
-            if k not in scapyinfo:
-                keyfail = True
-                missing_keys.add(k)
-        if keyfail:
-            print('{}: Scapy block present in the test, but is missing info:'
-                .format(self.sub_class))
-            print('{}'.format(missing_keys))
-
-        pkt = eval(scapyinfo['packet'])
-        if '$' in scapyinfo['iface']:
-            tpl = Template(scapyinfo['iface'])
-            scapyinfo['iface'] = tpl.safe_substitute(NAMES)
-        for count in range(scapyinfo['count']):
-            sendp(pkt, iface=scapyinfo['iface'])
+        lscapyinfo = self.args.caseinfo['scapy']
+        if type(lscapyinfo) != list:
+            lscapyinfo = [ lscapyinfo, ]
+
+        for scapyinfo in lscapyinfo:
+            scapy_keys = ['iface', 'count', 'packet']
+            missing_keys = []
+            keyfail = False
+            for k in scapy_keys:
+                if k not in scapyinfo:
+                    keyfail = True
+                    missing_keys.append(k)
+            if keyfail:
+                print('{}: Scapy block present in the test, but is missing info:'
+                    .format(self.sub_class))
+                print('{}'.format(missing_keys))
+
+            pkt = eval(scapyinfo['packet'])
+            if '$' in scapyinfo['iface']:
+                tpl = Template(scapyinfo['iface'])
+                scapyinfo['iface'] = tpl.safe_substitute(NAMES)
+            for count in range(scapyinfo['count']):
+                sendp(pkt, iface=scapyinfo['iface'])
index 4202e95..bd843ab 100644 (file)
         "teardown": [
             "$TC actions flush action ct"
         ]
+    },
+    {
+        "id": "3992",
+        "name": "Add ct action triggering DNAT tuple conflict",
+        "category": [
+            "actions",
+            "ct",
+           "scapy"
+        ],
+       "plugins": {
+               "requires": [
+                       "nsPlugin",
+                       "scapyPlugin"
+               ]
+       },
+        "setup": [
+            [
+                "$TC qdisc del dev $DEV1 ingress",
+                0,
+                1,
+               2,
+                255
+            ],
+           "$TC qdisc add dev $DEV1 ingress"
+        ],
+        "cmdUnderTest": "$TC filter add dev $DEV1 ingress protocol ip prio 1 flower ct_state -trk action ct commit nat dst addr 20.0.0.1 port 10 pipe action drop",
+       "scapy": [
+           {
+               "iface": "$DEV0",
+               "count": 1,
+               "packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.10')/TCP(sport=5000,dport=10)"
+           },
+           {
+               "iface": "$DEV0",
+               "count": 1,
+               "packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
+           }
+       ],
+        "expExitCode": "0",
+        "verifyCmd": "cat /proc/net/nf_conntrack",
+        "matchPattern": "dst=10.0.0.20",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 ingress"
+        ]
     }
 ]
index 6b4feb9..6a6bc7a 100644 (file)
@@ -307,6 +307,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
 {
        return kvm_make_all_cpus_request_except(kvm, req, NULL);
 }
+EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
 
 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
 void kvm_flush_remote_tlbs(struct kvm *kvm)
@@ -2929,6 +2930,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
                goto out;
        if (signal_pending(current))
                goto out;
+       if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
+               goto out;
 
        ret = 0;
 out:
@@ -2973,8 +2976,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                                goto out;
                        }
                        poll_end = cur = ktime_get();
-               } while (single_task_running() && !need_resched() &&
-                        ktime_before(cur, stop));
+               } while (kvm_vcpu_can_poll(cur, stop));
        }
 
        prepare_to_rcuwait(&vcpu->wait);
index c9bb395..28fda42 100644 (file)
@@ -40,21 +40,17 @@ static int __connect(struct irq_bypass_producer *prod,
        if (prod->add_consumer)
                ret = prod->add_consumer(prod, cons);
 
-       if (ret)
-               goto err_add_consumer;
-
-       ret = cons->add_producer(cons, prod);
-       if (ret)
-               goto err_add_producer;
+       if (!ret) {
+               ret = cons->add_producer(cons, prod);
+               if (ret && prod->del_consumer)
+                       prod->del_consumer(prod, cons);
+       }
 
        if (cons->start)
                cons->start(cons);
        if (prod->start)
                prod->start(prod);
-err_add_producer:
-       if (prod->del_consumer)
-               prod->del_consumer(prod, cons);
-err_add_consumer:
+
        return ret;
 }